]> git.donarmstrong.com Git - lilypond.git/commitdiff
Midi2ly: refactoring: initial support for instrument<->channel mapping.
authorJan Nieuwenhuizen <janneke@gnu.org>
Sun, 13 Mar 2011 15:38:06 +0000 (16:38 +0100)
committerJan Nieuwenhuizen <janneke@gnu.org>
Sun, 13 Mar 2011 15:51:05 +0000 (16:51 +0100)
Files using instrument<->channel mapping [and thus obviously] combined
with voice<->track mapping (the new LilyPond default) are now handled
by midi2ly.

input/regression/midi/quantize-duration.ly
input/regression/midi/quantize-start.ly
input/regression/midi/staff-map-instrument.ly [new file with mode: 0644]
input/regression/midi/staff-map-voice.ly [new file with mode: 0644]
input/regression/midi/staff-map.ly [deleted file]
input/regression/midi/voice-2.ly
input/regression/midi/voice-4.ly
scripts/midi2ly.py

index 1994eeb54780ca53e2d7b1f8f6976b557ef7001a..5024b18a1e55e2c252c0b3184dc67d9b1acfb773 100644 (file)
@@ -49,7 +49,6 @@ trackB = <<
 
 \score {
   <<
-    %\set Score.midiChannelMapping = #'voice
     \context Staff=trackB \trackB
   >>
   \layout {}
index 089f5f1c78039d44c6d1956fe14f1b1e694b3e62..208855f93244001931760d8d510f16942a234d7d 100644 (file)
@@ -49,7 +49,6 @@ trackB = <<
 
 \score {
   <<
-    \set Score.midiChannelMapping = #'voice
     \context Staff=trackB \trackB
   >>
   \layout {}
diff --git a/input/regression/midi/staff-map-instrument.ly b/input/regression/midi/staff-map-instrument.ly
new file mode 100644 (file)
index 0000000..8d90c9d
--- /dev/null
@@ -0,0 +1,46 @@
+\version "2.13.53"
+
+\header {
+texidoc="Midi2ly remaps voices correctly to staves in MIDI-files that use instrument<->channel mapping when combined with voice<->track mapping.  TODO: pianostaff"
+options=""
+}
+
+\score {
+%% TODO:PIANOSTAFF  \context PianoStaff <<
+  <<
+    \context Staff = "treble" <<
+      %% the default
+      %% \set Score.midiChannelMapping = #'instrument
+      \context Voice="one" \relative c'' {
+       \time 4/4
+       \key c \minor
+       \voiceOne
+%comes
+%7
+       f8 es16 d c16 bes ! as g f8 as' g f 
+%8     es8 d es f b, c d b |
+       f,16 g as4 g16 f e2 |
+      }
+      \context Voice="two" \relative c'' {
+       \voiceTwo
+%dux
+%7
+       c4 r4 r8 f es d |
+%8     r8 as g f g f16 es f8 d | 
+       <b, d>8 r <b d> r <g c>2 |
+      }
+    >>
+    \context Staff = "bass" <<
+      \context Voice="three" \relative c' {
+       \key c \minor
+       \clef bass
+%7
+       r8 c16 b c8 g as c16 b c8 d |
+%8     g8 c16 b c8 d f,16 g as4 g16 f | 
+       <c,, c'>1
+      }
+    >>
+  >>
+  \layout {}
+  \midi {}
+}
diff --git a/input/regression/midi/staff-map-voice.ly b/input/regression/midi/staff-map-voice.ly
new file mode 100644 (file)
index 0000000..ea45836
--- /dev/null
@@ -0,0 +1,45 @@
+\version "2.13.53"
+
+\header {
+texidoc="Midi2ly remaps voices correctly to staves in MIDI-files that use voice<->channel mapping when combined with staff<->track mapping.  TODO: pianostaff"
+options=""
+}
+
+\score {
+%% TODO:PIANOSTAFF  \context PianoStaff <<
+  <<
+    \context Staff = "treble" <<
+      \set Score.midiChannelMapping = #'voice
+      \context Voice="one" \relative c'' {
+       \time 4/4
+       \key c \minor
+       \voiceOne
+%comes
+%7
+       f8 es16 d c16 bes ! as g f8 as' g f 
+%8     es8 d es f b, c d b |
+       f,16 g as4 g16 f e2 |
+      }
+      \context Voice="two" \relative c'' {
+       \voiceTwo
+%dux
+%7
+       c4 r4 r8 f es d |
+%8     r8 as g f g f16 es f8 d | 
+       <b, d>8 r <b d> r <g c>2 |
+      }
+    >>
+    \context Staff = "bass" <<
+      \context Voice="three" \relative c' {
+       \key c \minor
+       \clef bass
+%7
+       r8 c16 b c8 g as c16 b c8 d |
+%8     g8 c16 b c8 d f,16 g as4 g16 f | 
+       <c,, c'>1
+      }
+    >>
+  >>
+  \layout {}
+  \midi {}
+}
diff --git a/input/regression/midi/staff-map.ly b/input/regression/midi/staff-map.ly
deleted file mode 100644 (file)
index a159f43..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-\version "2.13.53"
-
-\header {
-texidoc="Midi2ly remaps voices are corectly to staves.  TODO: pianostaff"
-options=""
-}
-
-\score {
-%% TODO:PIANOSTAFF  \context PianoStaff <<
-  <<
-    \context Staff = "treble" <<
-      \set Score.midiChannelMapping = #'voice
-      \context Voice="one" \relative c'' {
-       \time 4/4
-       \key c \minor
-       \voiceOne
-%comes
-%7
-       f8 es16 d c16 bes ! as g f8 as' g f 
-%8     es8 d es f b, c d b |
-       f,16 g as4 g16 f e2 |
-      }
-      \context Voice="two" \relative c'' {
-       \voiceTwo
-%dux
-%7
-       c4 r4 r8 f es d |
-%8     r8 as g f g f16 es f8 d | 
-       <b, d>8 r <b d> r <g c>2 |
-      }
-    >>
-    \context Staff = "bass" <<
-      \context Voice="three" \relative c' {
-       \key c \minor
-       \clef bass
-%7
-       r8 c16 b c8 g as c16 b c8 d |
-%8     g8 c16 b c8 d f,16 g as4 g16 f | 
-       <c,, c'>1
-      }
-    >>
-  >>
-  \layout {}
-  \midi {}
-}
index e12345e56e71448bf44595d207474d062bd2a034..8dffe97a35ec990fc72d6daa72a143ca21aceecf 100644 (file)
@@ -61,7 +61,6 @@ trackB = <<
 
 \score {
   <<
-    \set Score.midiChannelMapping = #'voice
     \context Staff=trackB \trackA
     \context Staff=trackB \trackB
   >>
index 23d87b761319551238cf312e91529c0ebb5bc484..cb34c8496ca3f9d28b2b49df20dcaeaabcdfbaca 100644 (file)
@@ -77,7 +77,6 @@ trackB = <<
 
 \score {
   <<
-    \set Score.midiChannelMapping = #'voice
     \context Staff=trackB \trackA
     \context Staff=trackB \trackB
   >>
index 9fb538b5bf31c4a33fbc0fe0902e0823be7fd562..9a6a0bb9e2490cef55bb0e0e7bb37e4859f06f20 100644 (file)
@@ -414,45 +414,144 @@ class Text:
     def __repr__ (self):
         return 'Text(%d=%s)' % (self.type, self.text)
 
-def get_voice (channel, events):
+def get_voice (channel, music):
     debug ('channel: ' + str (channel) + '\n')
-    music = parse_events (events)
     return unthread_notes (music)
 
 class Channel:
     def __init__ (self, number):
         self.number = number
         self.events = []
+        self.music = None
     def add (self, event):
         self.events.append (event)
-        return self
     def get_voice (self):
-        return get_voice (self.number, self.events)
+        if not self.music:
+            self.music = self.parse ()
+        return get_voice (self.number, self.music)
+    def parse (self):
+        pitches = {}
+        notes = []
+        music = []
+        last_lyric = 0
+        last_time = 0
+        for e in self.events:
+            t = e[0]
+
+            if start_quant_clocks:
+                t = quantise_clocks (t, start_quant_clocks)
+
+            if (e[1][0] == midi.NOTE_OFF
+                or (e[1][0] == midi.NOTE_ON and e[1][2] == 0)):
+                debug ('%d: NOTE OFF: %s' % (t, e[1][1]))
+                if not e[1][2]:
+                    debug ('   ...treated as OFF')
+                end_note (pitches, notes, t, e[1][1])
+
+            elif e[1][0] == midi.NOTE_ON:
+                if not pitches.has_key (e[1][1]):
+                    debug ('%d: NOTE ON: %s' % (t, e[1][1]))
+                    pitches[e[1][1]] = (t, e[1][2])
+                else:
+                    debug ('...ignored')
+
+            # all include ALL_NOTES_OFF
+            elif (e[1][0] >= midi.ALL_SOUND_OFF
+              and e[1][0] <= midi.POLY_MODE_ON):
+                for i in pitches:
+                    end_note (pitches, notes, t, i)
+
+            elif e[1][0] == midi.META_EVENT:
+                if e[1][1] == midi.END_OF_TRACK:
+                    for i in pitches:
+                        end_note (pitches, notes, t, i)
+                    break
+
+                elif e[1][1] == midi.SET_TEMPO:
+                    (u0, u1, u2) = map (ord, e[1][2])
+                    us_per_4 = u2 + 256 * (u1 + 256 * u0)
+                    seconds_per_1 = us_per_4 * 4 / 1e6
+                    music.append ((t, Tempo (seconds_per_1)))
+                elif e[1][1] == midi.TIME_SIGNATURE:
+                    (num, dur, clocks4, count32) = map (ord, e[1][2])
+                    den = 2 ** dur
+                    music.append ((t, Time (num, den)))
+                elif e[1][1] == midi.KEY_SIGNATURE:
+                    (alterations, minor) = map (ord, e[1][2])
+                    sharps = 0
+                    flats = 0
+                    if alterations < 127:
+                        sharps = alterations
+                    else:
+                        flats = 256 - alterations
+
+                    k = Key (sharps, flats, minor)
+                    if not t and global_options.key:
+                        # At t == 0, a set --key overrides us
+                        k = global_options.key
+                    music.append ((t, k))
+
+                    # ugh, must set key while parsing
+                    # because Note init uses key
+                    # Better do Note.calc () at dump time?
+                    global_options.key = k
+
+                elif (e[1][1] == midi.LYRIC
+                      or (global_options.text_lyrics and e[1][1] == midi.TEXT_EVENT)):
+                    if last_lyric:
+                        last_lyric.clocks = t - last_time
+                        music.append ((last_time, last_lyric))
+                    last_time = t
+                    last_lyric = Text (midi.LYRIC, e[1][2])
+
+                elif (e[1][1] >= midi.SEQUENCE_NUMBER
+                      and e[1][1] <= midi.CUE_POINT):
+                    text = Text (e[1][1], e[1][2])
+                    music.append ((t, text))
+                    if (text.type == midi.SEQUENCE_TRACK_NAME):
+                        self.name = text.text
+                else:
+                    if global_options.verbose:
+                        sys.stderr.write ("SKIP: %s\n" % `e`)
+            else:
+                if global_options.verbose:
+                    sys.stderr.write ("SKIP: %s\n" % `e`)
+
+        if last_lyric:
+            # last_lyric.clocks = t - last_time
+            # hmm
+            last_lyric.clocks = clocks_per_4
+            music.append ((last_time, last_lyric))
+            last_lyric = 0
+
+        i = 0
+        while len (notes):
+            if i < len (music) and notes[0][0] >= music[i][0]:
+                i = i + 1
+            else:
+                music.insert (i, notes[0])
+                del notes[0]
+        return music
     
-class Track:
+class Track (Channel):
     def __init__ (self):
+        Channel.__init__ (self, None)
         self.name = None
-        self.events = []
         self.channels = {}
     def _add (self, event):
-        if isinstance (event, Text) and event.type == midi.SEQUENCE_TRACK_NAME:
-            self.name = event.text
         self.events.append (event)
     def add (self, event, channel=None):
         if channel == None:
             self._add (event)
         else:
-#            self.channels[channel] = self.channels.get (channel, Channel (channel)).add (event)
             self.channels[channel] = self.channels.get (channel, Channel (channel))
             self.channels[channel].add (event)
-    def get_voice (self):
-        return get_voice (None, self.events)
     def get_voices (self):
         return ([self.get_voice ()]
                 + [self.channels[k].get_voice ()
                    for k in sorted (self.channels.keys ())])
 
-def parse_track (events):
+def create_track (events):
     track = Track ()
     for e in events:
         data = list (e[1])
@@ -497,110 +596,6 @@ def end_note (pitches, notes, t, e):
     except KeyError:
         pass
 
-def parse_events (channel):
-    pitches = {}
-
-    notes = []
-    events = []
-    last_lyric = 0
-    last_time = 0
-    for e in channel:
-        t = e[0]
-
-        if start_quant_clocks:
-            t = quantise_clocks (t, start_quant_clocks)
-
-        if (e[1][0] == midi.NOTE_OFF
-            or (e[1][0] == midi.NOTE_ON and e[1][2] == 0)):
-            debug ('%d: NOTE OFF: %s' % (t, e[1][1]))
-            if not e[1][2]:
-                debug ('   ...treated as OFF')
-            end_note (pitches, notes, t, e[1][1])
-
-        elif e[1][0] == midi.NOTE_ON:
-            if not pitches.has_key (e[1][1]):
-                debug ('%d: NOTE ON: %s' % (t, e[1][1]))
-                pitches[e[1][1]] = (t, e[1][2])
-            else:
-                debug ('...ignored')
-
-        # all include ALL_NOTES_OFF
-        elif (e[1][0] >= midi.ALL_SOUND_OFF
-          and e[1][0] <= midi.POLY_MODE_ON):
-            for i in pitches:
-                end_note (pitches, notes, t, i)
-
-        elif e[1][0] == midi.META_EVENT:
-            if e[1][1] == midi.END_OF_TRACK:
-                for i in pitches:
-                    end_note (pitches, notes, t, i)
-                break
-
-            elif e[1][1] == midi.SET_TEMPO:
-                (u0, u1, u2) = map (ord, e[1][2])
-                us_per_4 = u2 + 256 * (u1 + 256 * u0)
-                seconds_per_1 = us_per_4 * 4 / 1e6
-                events.append ((t, Tempo (seconds_per_1)))
-            elif e[1][1] == midi.TIME_SIGNATURE:
-                (num, dur, clocks4, count32) = map (ord, e[1][2])
-                den = 2 ** dur
-                events.append ((t, Time (num, den)))
-            elif e[1][1] == midi.KEY_SIGNATURE:
-                (alterations, minor) = map (ord, e[1][2])
-                sharps = 0
-                flats = 0
-                if alterations < 127:
-                    sharps = alterations
-                else:
-                    flats = 256 - alterations
-
-                k = Key (sharps, flats, minor)
-                if not t and global_options.key:
-                    # At t == 0, a set --key overrides us
-                    k = global_options.key
-                events.append ((t, k))
-
-                # ugh, must set key while parsing
-                # because Note init uses key
-                # Better do Note.calc () at dump time?
-                global_options.key = k
-
-            elif (e[1][1] == midi.LYRIC
-                  or (global_options.text_lyrics and e[1][1] == midi.TEXT_EVENT)):
-                if last_lyric:
-                    last_lyric.clocks = t - last_time
-                    events.append ((last_time, last_lyric))
-                last_time = t
-                last_lyric = Text (midi.LYRIC, e[1][2])
-
-            elif (e[1][1] >= midi.SEQUENCE_NUMBER
-                  and e[1][1] <= midi.CUE_POINT):
-                events.append ((t, Text (e[1][1], e[1][2])))
-            else:
-                if global_options.verbose:
-                    sys.stderr.write ("SKIP: %s\n" % `e`)
-                pass
-        else:
-            if global_options.verbose:
-                sys.stderr.write ("SKIP: %s\n" % `e`)
-            pass
-
-    if last_lyric:
-        # last_lyric.clocks = t - last_time
-        # hmm
-        last_lyric.clocks = clocks_per_4
-        events.append ((last_time, last_lyric))
-        last_lyric = 0
-
-    i = 0
-    while len (notes):
-        if i < len (events) and notes[0][0] >= events[i][0]:
-            i = i + 1
-        else:
-            events.insert (i, notes[0])
-            del notes[0]
-    return events
-
 def unthread_notes (channel):
     threads = []
     while channel:
@@ -949,7 +944,17 @@ def convert_midi (in_file, out_file):
     if global_options.verbose:
         print 'allowed tuplet clocks:', allowed_tuplet_clocks
 
-    staves = [Staff (parse_track (t)) for t in midi_dump[1]]
+    tracks = [create_track (t) for t in midi_dump[1]]
+    prev = None
+    staves = []
+    for t in tracks:
+        voices = t.get_voices ()
+        if ((t.name and prev and prev.name)
+            and t.name.split (':')[0] == prev.name.split (':')[0]):
+            staves[-1].voices += voices
+        else:
+            staves.append (Staff (t))
+        prev = t
 
     tag = '%% Lily was here -- automatically converted by %s from %s' % ( program_name, in_file)