$(outdir)/%.css: %.css
ln -f $< $@
+
+### Translations maintenance targets
+
po-update:
make -C po po-update
cp fr/GNUmakefile $(ISOLANG)
cp fr/user/GNUmakefile $(ISOLANG)/user
sed -i -e 's/ISOLANG *= *fr/ISOLANG = $(ISOLANG)/' $(ISOLANG)/GNUmakefile $(ISOLANG)/user/GNUmakefile
- $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) -o doc.pot --skeleton --gettext ../user/lilypond-learning.tely
+ $(auxscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) -o doc.pot --skeleton --gettext ../user/lilypond-learning.tely
mv $(outdir)/*.*tely $(ISOLANG)/user
msgmerge -U po/lilypond-doc.pot $(outdir)/doc.pot
cp po/lilypond-doc.pot po/$(ISOLANG).po
TELY_FILES = $(call src-wildcard,$(ISOLANG)/user/*.tely)
skeleton-update:
- $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) --skeleton $(TELY_FILES:$(ISOLANG)/user/%.tely=../user/%.tely)
- $(PYTHON) $(buildscript-dir)/texi-skeleton-update.py $(ISOLANG)/user $(outdir)
+ $(auxscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) --skeleton $(TELY_FILES:$(ISOLANG)/user/%.tely=../user/%.tely)
+ $(auxscript-dir)/texi-skeleton-update.py $(ISOLANG)/user $(outdir)
snippet-update:
- $(PYTHON) $(buildscript-dir)/update-snippets.py user $(ISOLANG)/user '*.itely'
+ $(auxscript-dir)/update-snippets.py user $(ISOLANG)/user '*.itely'
DOCUMENTS_INCLUDES:=-I $(ISOLANG)/user \
-I $(top-build-dir)/Documentation/$(ISOLANG)/user/out-www \
endif # ISOLANG
check-xrefs:
- $(PYTHON) $(buildscript-dir)/check_texi_refs.py --batch \
- $(DOCUMENTS_INCLUDES) $(buildscript-dir)/manuals_definitions.py
+ $(auxscript-dir)/check_texi_refs.py --batch \
+ $(DOCUMENTS_INCLUDES) $(auxpython-dir)/manuals_definitions.py
fix-xrefs:
- $(PYTHON) $(buildscript-dir)/check_texi_refs.py --auto-fix \
- $(DOCUMENTS_INCLUDES) $(buildscript-dir)/manuals_definitions.py
+ $(auxscript-dir)/check_texi_refs.py --auto-fix \
+ $(DOCUMENTS_INCLUDES) $(auxpython-dir)/manuals_definitions.py
check-translation:
- $(PYTHON) $(buildscript-dir)/check_translation.py $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES)
+ $(auxscript-dir)/check_translation.py $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES)
update-translation:
- $(PYTHON) $(buildscript-dir)/check_translation.py --update $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES)
+ $(auxscript-dir)/check_translation.py --update $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES)
translation-status:
make -C po out=www messages
- $(PYTHON) $(buildscript-dir)/translations-status.py
+ $(auxscript-dir)/translations-status.py
local-help: extra-local-help
+++ /dev/null
-# -*-python-*-
-
-Import ('env')
-env.AT_COPY ('index.html.in')
-
where <MY-LANGUAGE> is the ISO 639 language code.
Add a language definition for your language in
-buildscripts/langdefs.py.
+python/langdefs.py.
See next section about what files to translate and the following
detailed instructions after the next section.
TECHNICAL BACKGROUND
A number of Python scripts handle a part of the documentation
-translation process. All are located in buildscripts/, except
-langdefs.py which is in python/
+translation process.
+All scripts used to maintain the translations
+are located in scripts/aux/:
-* buildlib.py -- module containing common functions (read piped output
-of a shell command, use Git)
-* langdefs.py -- language definitions module
* check_translation.py -- show diff to update a translation
* texi-langutils.py -- quickly and dirtily parse Texinfo files to
make message catalogs and Texinfo skeleton files
* texi-skeleton-update.py -- update Texinfo skeleton files
+* update-snippets.py -- synchronize ly snippets with those from
+English docs
+* translations-status.py -- update translations status pages and word
+counts in the file you are reading.
+* tely-gettext.py -- gettext node names, section titles and references
+in the sources; WARNING only use this script when support for
+"makeinfo --html" has been dropped.
+
+Other scripts are used in the build process, in scripts/build/:
* html-gettext.py -- translate node names, section titles and cross
references in HTML files generated by makeinfo
-* add_html_footer.py (module imported by www_post.py) -- add footer and
-tweak links in HTML pages
* texi-gettext.py -- gettext node names, section titles and references
before calling texi2pdf
* mass-link.py -- link or symlink files between English documentation
and documentation in other languages
-* update-snippets.py -- synchronize ly snippets with those from
-English docs
-* translations-status.py -- update translations status pages and word
-counts in the file you are reading.
+
+Python modules used by scripts in scripts/aux/ or scripts/build/ (but
+not by installed Python scripts) are located in python/aux/:
+* manuals_definitions.py -- define manual names and name of
+cross-reference Texinfo macros
+* buildlib.py -- common functions (read piped output
+of a shell command, use Git)
+* postprocess_html.py (module imported by www_post.py) -- add footer and
+tweak links in HTML pages
+
+And finally
+* python/langdefs.py -- language definitions module
ln -f $< $@
$(outdir)/%.html: %.bib
- BSTINPUTS=$(src-dir) $(PYTHON) $(buildscript-dir)/bib2html.py -o $@ $<
+ BSTINPUTS=$(src-dir) $(buildscript-dir)/bib2html -o $@ $<
local-clean:
rm -f fonts.aux fonts.log feta*.tfm feta*.*pk
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'src_glob')
-bib = src_glob ('*.bib')
-env.AT_COPY ('index.html.in')
-
-# todo: must make html-long.bst as source too.
-# make as source?
-
-map (env.BIB2HTML, bib)
-env.Alias ('doc', bib)
-
<es'-2>4
@end lilypond
+@snippets
+
+@lilypondfile[verbatim,lilyquote,texidoc,doctitle]
+{avoiding-collisions-of-chord-fingering-with-beams.ly}
+
@seealso
Référence du programme : @rinternals{Fingering}.
ifeq ($(PLATFORM_WINDOWS),yes)
$(outdir)/%.ico: %.xpm
- $(PYTHON) $(buildscript-dir)/genicon.py $< $@
+ $(buildscript-dir)/genicon $< $@
default: $(lilypond-icon) $(ly-icon)
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'src_glob', 'install')
-
-tely = src_glob ('*.tely')
-texi = src_glob ('*.texi') + map (env.TEXI, tely)
-
-txt = map (env.TXT, texi)
-html = map (env.HTML, texi)
-
-env.Alias ('doc', txt)
-env.Alias ('doc', html)
-
-install (txt, env['sharedir_doc_package'])
+++ /dev/null
-# -*-python-*-
-
-import os
-import string
-
-Import ('env', 'base_glob', 'src_glob')
-tely = base_glob ('*.tely')
-png = src_glob ('*.png') + map (env.EPS2PNG, base_glob ('*.eps'))
-
-# We need lily and mf to build these.
-env.Depends ('lilypond.texi', ['#/lily', '#/mf', '#/python'])
-env.Depends ('music-glossary.texi', ['#/lily', '#/mf', '#/python'])
-
-env.Depends ('lilypond.texi', 'lilypond-internals.texi')
-
-eps = src_glob ('*.eps') + map (env.PNG2EPS, base_glob ('*.png'))
-env.Depends ('lilypond.texi', eps + png)
-
-lilypond_book_flags = '''--format=$LILYPOND_BOOK_FORMAT --process="lilypond -I$srcdir/input/manual/ $__verbose --backend=eps --formats=ps,png --header=texidoc -dcheck-internal-types -ddump-signatures -danti-alias-factor=2 -dgs-load-fonts" '''
-e = env.Copy (
-# LILYPOND_BOOK_FLAGS = '''--process="lilypond --backend=eps --formats=ps,png --header=texidoc -I#/input/manual -e '(ly:set-option (quote internal-type-checking) \#t)'"''',
- LILYPOND_BOOK_FLAGS = lilypond_book_flags,
- __verbose = ' --verbose',
- GENERATE_DOCUMENTATION = '$srcdir/ly/generate-documentation',
- ## TEXI2DVI_FLAGS = ['-I#Documentation/user'],
- )
-
-e.Command ('lilypond-internals.texi', ['#/lily', '#/mf', '#/python'],
- 'cd ${TARGET.dir} && $LILYPOND $GENERATE_DOCUMENTATION')
-
-## FIXME: implicit steps from [TE]LY -> PDF
-texi = map (env.TEXI, tely)
-dvi = map (env.TEXIDVI, tely)
-ps = map (env.DVIPS, tely) ###map (lambda x: x + '.dvi', tely))
-dvipdf = map (env.DVIPDF, tely)
-pdf = map (env.PSPDF, dvipdf)
-
-# FIXME: install
-info = map (env.INFO, tely)
-
-def file_subst (file_name, find, subst):
- s = open (file_name).read ()
- t = string.replace (s, find, subst)
- if s != t:
- os.rename (file_name, file_name + '~')
- h = open (file_name, "w")
- h.write (t)
- h.close ()
-
-e['usersrc'] = Dir ('.').srcnode ().abspath
-e['userout'] = Dir ('.').abspath
-
-a = ['$MAKEINFO -I$usersrc -I${SOURCE.dir} --html \
- --css-include=$srcdir/Documentation/texinfo.css $__verbose \
- --output=${TARGET.dir} $SOURCE',
- 'ln -f ${SOURCE.dir}/*.png ${SOURCE.dir}/*.ly ${TARGET.dir}/',]
-
-e.Command ('lilypond/index.html', 'lilypond.texi', a)
-e.Command ('lilypond-internals/index.html', 'lilypond-internals.texi', a)
-e.Command ('music-glossary/index.html', 'music-glossary.texi', a)
-
-a = ['$MAKEINFO -I$usersrc -I${SOURCE.dir} --html \
- --no-split --no-headers \
- --css-include=$srcdir/Documentation/texinfo.css $__verbose \
- --output=$TARGET $SOURCE']
-
-e.Command ('lilypond.html', 'lilypond.texi', a)
-e.Command ('lilypond-internals.html', 'lilypond-internals.texi', a)
-e.Command ('music-glossary.html', 'music-glossary.texi', a)
-
-#Hmm -- why not just mv ./Documentation/{*,*/*} ./doc :-)
-env.Alias ('doc', texi)
-env.Alias ('doc', dvi)
-env.Alias ('doc', ps)
-env.Alias ('doc', pdf)
-
-env.Alias ('doc', 'lilypond/index.html')
-env.Alias ('doc', 'lilypond-internals/index.html')
-env.Alias ('doc', 'lilypond.html')
-env.Alias ('doc', 'lilypond-internals.html')
-
-# install ('lilypond/*', env['sharedir_doc_package'] + '/html')
-# install ('lilypond-user/*', env['sharedir_doc_package'] + '/html')
shall see later that text can actually be specified in a much more
general way by using the very powerful @code{markup} command.
-@unnumberedsubsubsec Setting context properties with @code{\with}
+@subsubheading Setting context properties with @code{\with}
@funindex \with
@funindex with
this new default value may be restored with the
@code{\unset fontSize} command.
-@unnumberedsubsubsec Setting context properties with @code{\context}
+@subsubheading Setting context properties with @code{\context}
@cindex context properties, setting with \context
@funindex \context
For checking the coverage of the test suite, do the following
@example
-./buildscripts/build-coverage.sh
+./scripts/aux/build-coverage.sh
@emph{# uncovered files, least covered first}
-python ./buildscripts/coverage.py --summary out-cov/*.cc
+./scripts/aux/coverage.py --summary out-cov/*.cc
@emph{# consecutive uncovered lines, longest first}
-python ./buildscripts/coverage.py --uncovered out-cov/*.cc
+./scripts/aux/coverage.py --uncovered out-cov/*.cc
@end example
@c Fixes by Jean-Pierre Coulon and `Dirk', alphabetized by last name, KK, 10/07
@c Updates to the German translation by Till Rettig, 12/07
-Copyright @copyright{} 1999--2008 by the authors
+Copyright @copyright{} 1999--2009 by the authors
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.1
@end ignore
@*
-Copyright 1999--2008 by the authors
+Copyright 1999--2009 by the authors
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.1
* accidental::
* adagio::
* al niente::
+* alla breve::
* allegro::
* alteration::
* alto::
* crescendo::
* cue-notes::
* custos::
+* cut time::
* D::
* da capo::
* dal niente::
* lyrics::
* major::
* major interval::
+* maxima::
* meantone temperament::
* measure::
* measure repeat::
* melisma line::
* melodic cadence::
* mensural notation::
+* mensuration sign::
* meter::
* metronome::
* metronome mark::
* scordatura::
* score::
* second::
+* semibreve::
* semitone::
* seventh::
* sextolet::
@item FI: A, a
@end itemize
-
@seealso
@ref{Pitch names}.
S: ?,
FI: kahdelle.
-Abbreviated @notation{a2} or @notation{a 2}.
+Abbreviated @notation{a2} or @notation{a 2}. In orchestral scores, @notation{a
+due} indicates that:
@enumerate
-@item An indication in orchestral scores that a single part notated on a single
-staff that normally carries parts for two players (e.g. first and second oboes)
-is to be played by both players.
+@item A single part notated on a single staff that normally carries parts for
+two players (e.g. first and second oboes) is to be played by both players.
@item Or conversely, that two pitches or parts notated on a staff that normally
carries a single part (e.g. first violin) are to be played by different players,
@end enumerate
-
@seealso
-None yet.
+No cross-references.
@node accelerando
S: accelerando,
FI: accelerando, kiihdyttäen.
-[Italian: @q{speed up, accelerate}.]
+[Italian: @q{speed up, accelerate}]
Increase tempo
-
@seealso
-None yet.
+No cross-references.
@node accent
The stress of one tone over others.
-
@seealso
-None yet.
+No cross-references.
@node accessory
@node acciaccatura
@section acciaccatura
+ES: ?,
+I: acciaccatura,
+F: ?,
+D: ?,
+NL: ?,
+DK: ?,
+S: ?,
+FI: ?.
+
A grace note which takes its time from the rest or note preceding the
principal note to which it is attached. The acciaccatura is drawn as a
small eighth note (quaver) with a line drawn through the flag and
stem.
-
@seealso
@ref{appoggiatura}, @ref{grace notes}, @ref{ornament}.
@item Raising its pitch:
@itemize
-@item A @notation{double sharp}, by two semitones (a whole tone)
-@item A @notation{sharp}, by one semitone
+@item By two semitones (a whole tone)—@notation{double sharp}
+@item By one semitone—@notation{sharp}
@end itemize
@item Lowering its pitch:
@itemize
-@item A @notation{flat}, by one semitone
-@item A @notation{double flat}, by two semitones (a whole tone)
+@item By one semitone—@notation{flat}
+@item By two semitones (a whole tone)—@notation{double flat}
@end itemize
-@item Canceling the effects of the key signature or previous accidentals.
+@item Or canceling the effects of the key signature or previous accidentals.
@end itemize
@lilypond[quote,notime]
}
@end lilypond
-
@seealso
@ref{alteration}, @ref{semitone}, @ref{whole tone}.
S: adagio,
FI: adagio, hitaasti.
-[Italian: @q{comfortable, easy}.]
+[Italian: @q{comfortable, easy}]
@itemize
@end itemize
-
@seealso
@ref{andante}, @ref{largo}, @ref{sonata}.
S: ?,
FI: häviten olemattomiin.
-[Italian: @q{to nothing}.] Used with @notation{decrescendo} to indicate
+[Italian: @q{to nothing}] Used with @notation{decrescendo} to indicate
that the sound should fade away to nothing.
@notation{Al niente} is indicated by circling the tip of the hairpin:
@notation{al niente} with @notation{crescendo}. Instead, one should use
@emph{dal niente} (@notation{@b{from} nothing}).
+@seealso
+@ref{crescendo}, @ref{dal niente}, @ref{decrescendo}, @ref{hairpin}.
+
+
+@node alla breve
+@section alla breve
+
+ES: ?,
+I: ?,
+F: alla breve,
+D: ?,
+NL: ?,
+DK: ?,
+S: ?,
+FI: ?.
+
+[Italian: @q{on the breve}] Twice as fast as the notation indicates.
+
+Also called @notation{in cut-time}. The name derives from mensural notation,
+where the @notation{tactus} (or beat) is counted on the semibreve (the modern
+whole note). Counting @q{on the breve} shifts the tactus to the next longest
+note value, which (in modern usage) effectively halves all note values.
+
+(In mensural notation, breves and semibreves can have a ternary relationship, in
+which case @notation{alla breve} means thrice (not twice) as fast. In practice,
+this complication may not have mattered, since Gaffurius's system of multiplex
+proportions makes it easy to explicitly state which proportion is needed.)
@seealso
-@ref{crescendo}, @ref{decrescendo}, @ref{hairpin}.
+@ref{breve}, @ref{hemiola}, @ref{mensural notation}, @ref{note value},
+@ref{proportion}, @ref{whole note}.
@node allegro
S: allegro,
FI: allegro, nopeasti.
-[Italian: @q{cheerful}.] Quick tempo. Also used as a title for pieces in a quick
-tempo, especially the first and last movements of a sonata.
-
+[Italian: @q{cheerful}] Quick tempo. Also used as a title for pieces in a
+quick tempo, especially the first and last movements of a sonata.
@seealso
@ref{sonata}.
An alteration is the modification, raising or lowering, of a note's
pitch. It is established by an accidental.
+@c TODO: add second meaning from mensural notation
@seealso
@ref{accidental}.
reached the height of the female voice. This type of voice is also
known as countertenor.
-
@seealso
@ref{countertenor}.
C clef setting middle C on the middle line of the staff.
-
@seealso
@ref{C clef}.
also denote the pitch range that a musical instrument is capable of playing.
Sometimes anglicized to @emph{ambit} (pl. @emph{ambits}).
-
@seealso
-None yet.
+No cross-references.
@node anacrusis
FI: kohotahti.
An anacrusis (also known as pickup or upbeat) is an incomplete measure
-of music before a section of music. It also refers to the initial
+of music before a section of music. It also refers to the initial
note(s) of a melody occurring in that incomplete measure.
@lilypond[quote,relative=1]
f,2. \bar "||"
@end lilypond
-
@seealso
@ref{measure}, @ref{meter}.
S: andante,
FI: andante, käyden.
-[Italian: present participle of @emph{andare}, @q{to walk}.]
+[Italian: present participle of @emph{andare}, @q{to walk}]
Walking tempo/character.
-
@seealso
-None yet.
+No cross-references.
@node appoggiatura
>>
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node arpeggio
S: arpeggio,
FI: arpeggio, murtosointu.
-[Italian: @q{harp-like, played like a harp}.]
+[Italian: @q{harp-like, played like a harp}]
@lilypond[quote,line-width=13\cm]
\new PianoStaff <<
>>
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node articulation
should be played. Slurs, accents, staccato, and legato are all
examples of articulation.
-
@seealso
-None yet.
+No cross-references.
@node ascending interval
A distance between a starting lower note and a higher ending note.
-
@seealso
-None yet.
+No cross-references.
@node augmented interval
S: överstigande intervall,
FI: ylinouseva intervalli.
-
@seealso
@ref{interval}.
This is a placeholder for augmentation (wrt mensural notation).
-
@seealso
@ref{diminution}, @ref{mensural notation}.
@end itemize
-
@seealso
-None yet.
+No cross-references.
@node B
@item FI: H, h
@end itemize
-
@seealso
@ref{H}, @ref{Pitch names}
@node backfall
@section backfall
-
@seealso
@ref{appoggiatura}.
@node bar
@section bar
-
@seealso
@ref{measure}.
in secular music, or in sacred music to indicate congruences between parts
in otherwise-unmetered music).
-
@seealso
@ref{measure}.
@c F: clef de troisième ligne dropped
-
@seealso
@ref{bass}, @ref{tenor}.
C or F clef setting middle C on the upper staff line.
-
@seealso
@ref{C clef}, @ref{F clef}.
@end itemize
-
@seealso
@ref{strings}.
A clef setting with middle C on the first top ledger line.
-
@seealso
@ref{F clef}.
g64-"1/64"[ s32 g64 s32 g64 s32 g64] s32
@end lilypond
-
@seealso
@ref{feathered beam}.
g8 d' c | b c a | g4. \bar "||"
@end lilypond
-
@seealso
@ref{time signature}.
@node beat repeat
@section beat repeat
-
@seealso
@ref{percent repeat}.
@node bind
@section bind
-
@seealso
@ref{tie}.
>>
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node bracket
S: ?,
FI: sulkumerkki.
-
@seealso
@ref{brace}
formed mouth piece. The brass instruments commonly used in a symphony
orchestra are trumpet, trombone, french horn, and tuba.
-
@seealso
-None yet.
+No cross-references.
@node breath mark
Indication of where to breathe in vocal and wind instrument parts.
-
@seealso
@ref{caesura}.
@section breve
@itemize
-@item US: breve, double-whole note,
-@item ES: cuadrada, breve,
-@item I: breve,
-@item F: brève,
-@item D: Brevis,
-@item NL: brevis,
-@item DK: brevis,
-@item S: brevis,
-@item FI: brevis, kaksoiskokonuotti.
+@item US: breve, double-whole note
+@item ES: cuadrada, breve
+@item I: breve
+@item F: brève
+@item D: Brevis
+@item NL: brevis
+@item DK: brevis
+@item S: brevis
+@item FI: brevis, kaksoiskokonuotti
@end itemize
-Note value twice as long as a whole note. Mainly used in pre-1650 music.
-The shortest note value generally used in white mensural notation, hence the
-name, which originally meant @q{of short duration}.
+Note value: twice the length of a @notation{whole note} (@notation{semibreve}).
+
+Mainly used in music from before 1650. In mensural notation, it was a note
+of fairly short duration—hence the name, which is Latin for @q{short} or
+@q{of short duration}.
@lilypond[quote,notime,relative=2]
g\breve
@end lilypond
-
@seealso
-@ref{mensural notation}, @ref{note value}.
+@ref{mensural notation}, @ref{note value}, @ref{semibreve}.
@node C
@item FI: C, c
@end itemize
-
@seealso
@ref{Pitch names}.
}
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node cadence
S: kadens,
FI: kadenssi, lopuke.
-
@seealso
@ref{harmonic cadence}, @ref{functional harmony}.
ability to improvise. Since the middle of the 19th century, however,
most cadenzas have been written down by the composer.
-
@seealso
-None yet.
+No cross-references.
@node caesura
S: ?,
FI: välimerkki.
-[Latin: from the supine of @emph{caedere} @q{to cut down}.]
+[Latin: from the supine of @emph{caedere} @q{to cut down}]
The break between two musical phrases, sometimes (but not always) marked by a
rest or a breath mark.
-
@seealso
@ref{breath mark}.
S: kanon,
FI: kaanon, tarkka jäljittely.
-
@seealso
@ref{counterpoint}.
Logarithmic unit of measurement. 1@tie{}cent is 1/1200 of an octave
(1/100 of an equally tempered semitone).
-
@seealso
@ref{equal temperament}, @ref{semitone}.
@node central C
@section central C
-
@seealso
@ref{middle C}.
FI: sointu.
Three or more tones sounding simultaneously. In traditional European music
-the base chord is a @emph{triad} consisting of two thirds. @emph{Major}
+the base chord is a @emph{triad} consisting of two thirds. @emph{Major}
(major + minor third) as well as @emph{minor} (minor + major third) chords
-may be extended with more thirds. Four-tone @emph{seventh chords} and
+may be extended with more thirds. Four-tone @emph{seventh chords} and
five-tone @emph{ninth} major chords are most often used as dominants
(functional harmony). Chords having no third above the lower notes to
define their mood are a special case called @q{open chords}. The lack of
>>
@end lilypond
-
@seealso
@ref{functional harmony}, @ref{interval}, @ref{inversion}, @ref{quality},
@ref{third}.
c1 cis d dis e f fis g gis a ais b c
@end lilypond
-
@seealso
@ref{semitone}.
Using tones extraneous to a diatonic scale (minor, major).
-
@seealso
@ref{diatonic scale}.
S: kyrkotonart,
FI: moodi, kirkkosävellaji.
-
@seealso
@ref{diatonic scale}.
\musicglyph #"clefs.F"
\strut
\musicglyph #"clefs.C"
- }
+ }
}
@end lilypond
}
@end lilypond
-
@seealso
@ref{C clef}, @ref{F clef}, @ref{G clef}.
\makeClusters { <c e>4 <b f'> <b g'> <c g>8 <f e> }
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node comma
Difference in pitch between a note derived from pure tuning and the
same note derived from some other tuning method.
-
@seealso
@ref{didymic comma}, @ref{Pythagorean comma}, @ref{syntonic comma},
@ref{temperament}.
4/4 time. The symbol, which resembles a capital letter C, comes from
mensural notation.
-
@seealso
@ref{mensural notation}, @ref{meter}.
S: komplementärintervall (?),
FI: täydentävä intervalli.
-
@seealso
@ref{inverted interval}.
Intervals larger than an octave.
-
@seealso
@ref{interval}.
A meter that includes a triplet subdivision within the beat, such as
6/8, 9/8, 12/8.
-
@seealso
@ref{meter}, @ref{simple meter}.
}
@end lilypond
-
@seealso
@ref{compound meter}, @ref{meter}, @ref{polymetric time signature}.
@end itemize
-
@seealso
@ref{transposing instrument}.
g4 g g a | b2 a | g4 b a a | g1 \bar "||"
@end lilypond
-
@seealso
@ref{disjunct movement}.
S: konsonans,
FI: konsonanssi, sopusointi.
-
@seealso
@ref{harmony}.
S: alt,
FI: kontra-altto.
-
@seealso
@ref{alto}.
@c Copying music required more skill than engraving. Flagged for NPOV
-
@seealso
-None yet.
+No cross-references.
@node counterpoint
>>
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node countertenor
S: kontratenor, counter tenor,
FI: kontratenori.
-
@seealso
@ref{contralto}.
g4\< a b c | d1\! \bar "|."
@end lilypond
-
@seealso
@ref{decrescendo}, @ref{hairpin}.
In a separate part notes belonging to another part with the purpose of
hinting when to start playing. Usually printed in a smaller type.
-
@seealso
-None yet.
+No cross-references.
@node custos
}
@end lilypond
+@seealso
+No cross-references.
+
+
+@node cut time
+@section cut time
@seealso
-None yet.
+@ref{alla breve}.
@node D
@item FI: D, d
@end itemize
-
@seealso
@ref{Pitch names}
Abbreviated @notation{D.C.}. Indicates that the piece is to be repeated from
the beginning to the end or to a certain place marked @emph{fine}.
-
@seealso
-None yet.
+No cross-references.
@node dal niente
S: ?,
FI: tyhjästä ilmaantuen.
-[Italian: @q{from nothing}.] Used with @notation{crescendo} to indicate
+[Italian: @q{from nothing}] Used with @notation{crescendo} to indicate
that the sound should gradually increase from nothing.
-
@seealso
@ref{al niente}.
\bar "|."
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node decrescendo
d4\> c b a | g1 \! \bar "|."
@end lilypond
-
@seealso
@ref{crescendo}, @ref{diminuendo}, @ref{hairpin}.
A distance between a starting higher note and a lower ending note.
-
@seealso
-None yet.
+No cross-references.
@node diatonic scale
S: diatonisk skala,
FI: diatoninen asteikko.
-A scale consisting of 5@w{ }whole tones and 2@w{ }semitones (S). Scales
+A scale consisting of 5@w{ }whole tones and 2@w{ }semitones (S). Scales
played on the white keys of a piano keybord are diatonic. These scales
are sometimes called, somewhat inaccurately, @q{church modes}).
c1 d
e^"~~ S" f g a b^"~~ S" c
}
- \lyrics {
+ \lyrics {
Major
}
>>
a1
b^"~~ S" c d e^"~~ S" f g a
}
- \lyrics {
+ \lyrics {
"Ancient (or Natural) minor"
}
>>
a1
b^"~~ S" c d e fis gis^"~~ S" a
}
- \lyrics {
+ \lyrics {
"Melodic minor ascending"
}
>>
a1
g! f!^"~~ S" e d c^"~~ S" b a
}
- \lyrics {
+ \lyrics {
"Melodic minor descending"
}
>>
@end lilypond
-
@seealso
@ref{semitone}, @ref{whole tone}.
@node didymic comma
@section didymic comma
-
@seealso
@ref{syntonic comma}.
S: förminskat intervall,
FI: vähennetty intervalli.
-
@seealso
@ref{interval}.
S: diminuendo,
FI: diminuendo, hiljentyen.
-
@seealso
@ref{decrescendo}.
This is a stub for diminution (@emph{wrt} mensural notation).
-
@seealso
@ref{augmentation}, @ref{mensural notation}.
S: ?,
FI: suora.
-
@seealso
@ref{custos}.
@lilypond[quote,relative=1]
\key a \major
\time 4/4
-\partial 8 e8 |
-a4. gis8 b a e cis |
-fis2 d4.
-\bar "||"
+ \partial 8 e8 |
+ a4. gis8 b a e cis |
+ fis2 d4.
+ \bar "||" }
@end lilypond
-
@seealso
@ref{conjunct movement}.
Another name for @ref{dissonant interval}.
-
@seealso
@ref{dissonant interval}, @ref{harmony}.
S: dissonans,
FI: dissonanssi, dissonoiva intervalli, riitasointi.
-
@seealso
@ref{harmony}.
TODO: musical example here?
-
@seealso
-None yet.
+No cross-references.
@node doit
Indicator for a indeterminately rising pitch bend. Compare with
@emph{glissando}, which has determinate starting and ending pitches.
-
@seealso
@ref{fall}, @ref{glissando}.
The fifth @emph{scale degree} in @emph{functional harmony}.
-
@seealso
@ref{functional harmony}, @ref{scale degree}.
S: dominantnonackord,
FI: dominanttinoonisointu.
-
@seealso
@ref{chord}, @ref{functional harmony}.
S: dominantseptimackord,
FI: dominanttiseptimisointu.
-
@seealso
@ref{chord}, @ref{functional harmony}.
S: dorisk tonart,
FI: doorinen moodi.
-
@seealso
@ref{diatonic scale}.
S: punkt,
FI: piste.
-
@seealso
@ref{dotted note}, @ref{note value}.
S: punkterad not,
FI: pisteellinen nuotti.
-
@seealso
@ref{note value}.
S: dubbelslag,
FI: kaksoisappogiatura, kaksoisetuhele.
-
@seealso
@ref{appoggiatura}.
Indicates the end of a section within a movement.
-
@seealso
-None yet.
+No cross-references.
@node double dotted note
S: dub@-bel@-punk@-te@-rad not,
FI: kaksoispisteellinen nuotti.
-
@seealso
@ref{note value}.
S: dubbelbe,
FI: kaksoisalennusmerkki.
-
@seealso
@ref{accidental}.
S: dubbelkors,
FI: kaksoisylennysmerkki.
-
@seealso
@ref{accidental}.
S: ?,
FI: kaksois-aika-arvomerkintä.
-
@seealso
@ref{polymetric time signature}.
A simultaneous trill on two notes, usually in the distance of a third.
-
@seealso
-None yet.
+No cross-references.
@node duple meter
S: tvåtakt,
FI: kaksoistempo.
-
@seealso
@ref{meter}.
S: duol,
FI: duoli.
-
@seealso
@ref{note value}.
S: tonlängd,
FI: kesto, aika-arvo.
-
@seealso
@ref{note value}.
one degree to another. The terms, abbreviations, and symbols used to
indicate this information are called dynamic marks.
-
@seealso
@ref{piano}, @ref{forte}, @ref{crescendo}, @ref{decrescendo},
@ref{diminuendo}.
@item FI: E, e
@end itemize
-
@seealso
@ref{Pitch names}
@node ecclesiastical mode
@section ecclesiastical mode
-
@seealso
@ref{church mode}, @ref{diatonic scale}.
@item FI: kahdeksasosanuotti
@end itemize
-
@seealso
@ref{note value}.
@item FI: kahdeksasosatauko
@end itemize
-
@seealso
@ref{note value}.
by a lyric tie, which looks like (and serves the same function) as a musical
tie.
-
@seealso
@ref{lyric tie}.
@node embellishment
@section embellishment
-
@seealso
@ref{ornament}.
The traditional process of music printing is done through cutting in a
plate of metal. Now also the term for the art of music typesetting.
-
@seealso
-None yet.
+No cross-references.
@node enharmonic
>>
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node equal temperament
A tuning system that divides the octave into 12 equal semitones (each of
which is precisely equal to 100 cents).
-
@seealso
@ref{cent}, @ref{semitone}, @ref{temperament}.
@end itemize
-
@seealso
@ref{allegro}, @ref{andante}, @ref{crescendo}, @ref{forte}.
@end itemize
-
@seealso
@ref{melisma}, @ref{sul G}, @ref{thorough bass}, @ref{octave mark},
@ref{octave marking}.
@item FI: F, f
@end itemize
-
@seealso
@ref{Pitch names}
>>
@end lilypond
-
@seealso
@ref{baritone clef}, @ref{strings}.
Indicator for a indeterminately falling pitch bend. Compare with
@emph{glissando}, which has determinate starting and ending pitches.
-
@seealso
@ref{doit}, @ref{glissando}.
direction of @q{feathering} -- but without changing the overall tempo
of the piece.
-
@seealso
Internals Reference: @ruser{Manual beams}
\bar "|."
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node fifth
S: kvint,
FI: kvintti.
-
@seealso
@ref{interval}.
>>
@end lilypond
-
@seealso
@ref{chord}, @ref{interval}.
Figures to the side or above the note that methodically indicate which
fingers to use while playing a passage.
-
@seealso
-None yet.
+No cross-references.
@node flag
g64-"64th" s8
@end lilypond
-
@seealso
@ref{note value}.
@end itemize
-
@seealso
@ref{articulation}, @ref{harmonics}.
S: beförtecken,
FI: alennusmerkki.
-
@seealso
@ref{accidental}.
@node forefall
@section forefall
-
@seealso
@ref{appoggiatura}.
S: forte,
FI: forte, voimakkaasti.
-[Italian: @q{loud}.]
+[Italian: @q{loud}]
Abbreviated @notation{@b{f}}. Variants include:
@item @emph{fortissimo}, very loud (notated @notation{@b{ff}}).
@end itemize
-
@seealso
-None yet.
+No cross-references.
@node fourth
S: kvart,
FI: kvartti.
-
@seealso
@ref{interval}.
If you are producing scores for eventual publication by a commercial publisher,
you may wish to procure a copy of their style manual.
-
@seealso
@ref{Frenched staff}.
Frenched staff has unneeded measures or sections removed. This is useful
for producing, for example, an @emph{ossia} staff.
-
@seealso
@ref{ossia}.
S: fuga,
FI: fuuga.
-
@seealso
@ref{counterpoint}.
>>
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node G
@item FI: G, g
@end itemize
-
@seealso
@ref{Pitch names}
@lilypond[quote,notime]
\relative c'' {
\override Staff.Clef #'full-size-change = ##t
- \set Score.proportionalNotationDuration = #(ly:make-moment 1 8)
- \clef french
- g1
- \clef treble
- g1
- \clef "G^8"
- g1
- \clef "G_8"
- g1
+ \set Score.proportionalNotationDuration = #(ly:make-moment 1 8)
+ \clef french
+ g1
+ \clef treble
+ g1
+ \clef "G^8"
+ g1
+ \clef "G_8"
+ g1
}
\addlyrics {
- "french violin clef"
- "violin clef"
- "octave up"
- "octave down"
-}
+ "french violin clef"
+ "violin clef"
+ "octave up"
+ "octave down"
+ }
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node glissando
Letting the pitch slide fluently from one note to the other.
-
@seealso
-None yet.
+No cross-references.
@node grace notes
Notes printed in small types to indicate that their time values are not
counted in the rhythm of the bar.
-
@seealso
@ref{acciaccatura}, @ref{appoggiatura}, @ref{grace notes},
@ref{ornament}.
A combination of two staves with a brace. Usually used for piano music.
-
@seealso
@ref{brace}.
[Italian] Slow, solemn.
-
@seealso
-None yet.
+No cross-references.
@node gruppetto
@section gruppetto
-
@seealso
@ref{turn}.
usage. In the standard usage of these countries, @notation{B} means
@notation{B flat}.
-
@seealso
@ref{Pitch names}, @ref{B}.
c1\!
@end lilypond
-
@seealso
@ref{crescendo}, @ref{decrescendo}.
@item FI: puolinuotti.
@end itemize
-
@seealso
@ref{note value}.
@item FI: puolitauko.
@end itemize
-
@seealso
@ref{note value}.
natural harmonics, which are those played on the open string; and
artificial harmonics, which are produced on stopped strings.
-
@seealso
-None yet.
+No cross-references.
@node harmony
Consonances:
@lilypond[quote,notime,relative=2,line-width=13.0\cm]
-<g g>1_"unison " s
-<g b>_"third " s
-<g c>_"fourth " s
-<g d'>_"fifth " s
-<g e'>_"sixth " s
-<g g'>_"octave " s
-<g b'>_"tenth" s s
+ <g g>1_"unison " s
+ <g b>_"third " s
+ <g c>_"fourth " s
+ <g d'>_"fifth " s
+ <g e'>_"sixth " s
+ <g g'>_"octave " s
+ <g b'>_"tenth" s s
@end lilypond
Dissonances:
@lilypond[quote,notime,relative=2,line-width=13.0\cm]
-<g a>1_"second " s s
-<g f'>_"seventh " s s
-<g a'>_"ninth" s s
+ <g a>1_"second " s s
+ <g f'>_"seventh " s s
+ <g a'>_"ninth" s s
@end lilypond
For harmony that uses three or more notes, see @ref{chord}.
-
@seealso
@ref{chord}.
and is therefore a polymeter (second definition) of considerable antiquity.
-
@seealso
@ref{mensural notation}, @ref{meter}, @ref{polymeter}, @ref{proportion}.
Music in which one voice leads melodically supported by the other voices in
the same rhythm (more or less). In contrast to @emph{polyphony}.
-
@seealso
@ref{polyphony}.
+
@node hymn meter
@section hymn meter
@item 88.88.88.88 is Double Long Meter (DLM or D.L.M.)
@end itemize
-
@seealso
-None yet.
+No cross-references.
@node interval
S: intervall,
FI: intervalli, kahden sävelen korkeusero.
-Difference in pitch between two notes. Intervals may be perfect, minor,
-major, diminished, or augmented. The augmented fourth and the diminished
-fifth are identical (or @emph{enharmonic}) and are called @emph{tritonus}
-because they consist of three whole tones. The addition of such two
-intervals forms an octave.
-
-@lilypond[quote,notime,line-width=13.0\cm]
-<<
- \context Voice \relative c'' {
- < g g >1
- < g as >^"minor"
- < g a! >
- < g ais >^"augm"
- < gis bes >^"dimin"
- < g! bes >^"minor"
- < g b! >^"major"
- < g bis >^"augm"
- }
- \context Lyrics \lyrics {
- "unison " "second " "second " "second "
- "third " "third " "third " "third"
- }
->>
-@end lilypond
-
-@lilypond[quote,notime,line-width=13.0\cm]
-<<
- \context Staff \relative c'' {
- <g c >1^"perfect"
- <g cis>^"aug"
- <g d'>^"perfect"
- <g des'> ^"dim"
- <gis es'>^"dim"
- <g! es'>^"minor"
- <g e'!>^"major"
- <g eis'>^"aug"
- }
- \lyrics {
- "fourth " "fourth " "fifth " "fifth "
- "sixth " "sixth " "sixth " "sixth"
- }
->>
-@end lilypond
+Difference in pitch between two notes. Intervals may be diminished, minor,
+perfect, major, or augmented. The augmented fourth and the diminished fifth are
+identical (or @emph{enharmonic}) and are called @emph{tritonus} because they
+consist of three whole tones. The addition of such two intervals forms an
+octave.
@lilypond[quote,notime,line-width=13.0\cm]
<<
- \context Staff \relative c'' {
- <gis f'!>1^"dimin"
- <g! f'!>^"minor"
- <g fis'>^"major"
- <g g'>
- <g as'>^"minor"
- <g a'!>^"major"
- <g bes'>^"minor"
- <g b'!>^"major"
- }
- \context Lyrics \lyrics {
- "seventh " "seventh " "seventh " "octave "
- "ninth " "ninth " "tenth " "tenth"
- }
+\context Voice \relative c'' {
+% Prime or unison
+ < g g >1
+ < g gis >^"aug"
+% Second
+ < gis as >^"dim"
+ < g! as >^"min"
+ < g a! >^"maj"
+ < g ais >^"aug"
+% Third
+ < gis bes >^"dim"
+ < g! bes >^"min"
+ < g b! >^"maj"
+ < g bis >^"aug"
+% Fourth
+ < g ces >^"dim"
+ < g c! >^"per"
+ < g cis >^"aug"
+% Fifth
+ < g des' >^"dim"
+ < g d' >^"per"
+ < g dis >^"aug"
+% Sixth
+ < gis es' >^"dim"
+ < g! es' >^"min"
+ < g e'! >^"maj"
+ < g eis' >^"aug"
+% Seventh
+ < gis f'! >^"dim"
+ < g! f'! >^"min"
+ < g fis' >^"maj"
+ < g fisis' >^"aug"
+% Octave
+ < g ges' >^"dim"
+ < g g' >^"per"
+ < g gis' >^"aug"
+}
+\context Lyrics \lyrics {
+ "unison " "unison "
+ "second " "second " "second " "second "
+ "third " "third " "third " "third "
+ "fourth " "fourth " "fourth "
+ "fifth " "fifth " "fifth "
+ "sixth " "sixth " "sixth " "sixth "
+ "seventh" "seventh" "seventh" "seventh"
+ "octave " "octave " "octave "
+}
>>
@end lilypond
-
@seealso
@ref{enharmonic}, @ref{whole tone}.
unstable chord position.
@end table
-
@seealso
-None yet.
+No cross-references.
@node inverted interval
<g, c>_"fourth " s s <g' c,>_"fifth " s s \bar "||"
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node just intonation
Tuning system in which the notes are obtained by adding and subtracting
natural fifths and thirds.
-
@seealso
@ref{temperament}.
According to the 12@w{ }tones of the @emph{chromatic scale} there are
12@w{ }keys, one on@w{ }c, one on c-sharp, etc.
-
@seealso
@ref{chromatic scale}, @ref{key signature}.
The sharps or flats appearing at the beginning of each staff indicating the
key of the music.
-
@seealso
@ref{accidental}.
S: ?,
FI: antaa väristä.
-[French: @q{Let vibrate}.] Most frequently associated with harp
+[French: @q{Let vibrate}] Most frequently associated with harp
parts. Marked @notation{l.v.} in the score.
-
@seealso
-None yet.
+No cross-references.
@node largo
called because of its strong tendency to @q{lead up} (resolve upwards)
to the tonic scale degree.
-
@seealso
-@ref{scale degree}, @ref{semitone}.
+@ref{scale degree, @ref{semitone}.
@node ledger line
c''1
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node legato
>>
@end lilypond
-
@seealso
@ref{staccato}.
@node legato curve
@section legato curve
-
@seealso
@ref{slur}, @ref{legato}.
@node leger line
@section leger line
-
@seealso
@ref{ledger line}.
system of the white mensural notation, the need for ligatures to denote such
patterns disappeared.
-
@seealso
@ref{mensural notation}.
Also, the name of a music typesetting program.
-
@seealso
-None yet.
+No cross-references.
@node line
S: notlinje,
FI: viiva, nuottiviiva.
-
@seealso
@ref{staff}.
S: ?,
FI: kirjoitetussa äänenkorkeudessa.
-[Italian: @q{place}.] Instruction to play the following passage at the
+[Italian: @q{place}] Instruction to play the following passage at the
written pitch. Cancels octave mark (q.v.).
-
@seealso
@ref{octave mark}, @ref{octave marking}.
S: långt förslag,
FI: pitkä appoggiatura, pitkä etuhele.
-
@seealso
@ref{appoggiatura}.
@item FI: longa.
@end itemize
-Note value: double length of @notation{breve}.
+Note value: twice the length of a @notation{breve}.
@lilypond[quote,notime,relative=2]
\override NoteHead #'style = #'mensural
g\longa g\breve
@end lilypond
-
@seealso
@ref{breve}, @ref{note value}.
@c TODO: add languages
-
@seealso
@ref{elision}.
@c Definition?
-
@seealso
-None yet.
+No cross-references.
@node major
S: dur,
FI: duuri.
-
@seealso
@ref{diatonic scale}.
S: stort intervall,
FI: suuri intervalli.
-
@seealso
@ref{interval}.
+@node maxima
+@section maxima
+
+ES: ?,
+I: ?,
+F: ?,
+D: ?,
+NL: ?,
+DK: ?,
+S: ?,
+FI: ?.
+
+Note value: twice the length of a @notation{longa}.
+
+The maxima is the largest duration in use during the 15th and 16th centuries.
+Like the longa, the maxima can be either two or three times as long as the
+@notation{longa} (called @notation{binary} and @notation{ternary},
+respectively). By the late 15th century, most composers used the smaller
+proportion by default.
+
+@seealso
+@ref{Duration names notes and rests}, @ref{longa}, @ref{note values}.
+
+
@node meantone temperament
@section meantone temperament
temperament only a limited set of keys are playable. Used for tuning
keyboard instruments for performance of pre-1650 music.
-
@seealso
@ref{cent}, @ref{temperament}.
Such groups in numbers of two or more recur consistently throughout the
composition and are separated from each other by bar lines.
-
@seealso
@ref{bar line}, @ref{beat}, @ref{meter}.
@node measure repeat
@section measure repeat
-
@seealso
@ref{percent repeat}.
@end itemize
-
@seealso
@ref{chord}, @ref{functional harmony}, @ref{relative key}.
A melisma (Greek: plural @emph{melismata}) is a group of notes or tones sung
on one syllable, especially as applied to liturgical chant.
-
@seealso
-None yet.
+No cross-references.
@node melisma line
S: ?,
FI: melismaviiva.
-
@seealso
@ref{extender line}.
@node melodic cadence
@section melodic cadence
-
@seealso
@ref{cadenza}.
TODO: add to definition (including summary info on proportional notation)
-
@seealso
@ref{augmentation}, @ref{diminution}, @ref{ligature}, @ref{proportion}.
@c TODO: more cross-references?
+@node mensuration sign
+@section mensuration sign
+
+@c TODO: add languages
+
+ES: ?,
+I: ?,
+F: ?,
+D: ?,
+NL: ?,
+DK: ?,
+S: ?,
+FI: ?.
+
+The ancestor of the time signature, mensuration signs were used to indicate the
+relationship between two sets of note durations—specifically, the ratio of
+breves to semibreves (called @notation{tempus}), and of semibreves to minims
+(called @notation{prolatio}).
+
+Each ratio was represented with a single single sign, and was either
+three-to-one (ternary) or two-to-one (binary), as in modern music notation.
+Unlike modern music notation, the @emph{ternary} ratio was the preferred
+one—applied to the @emph{tempus}, it was called @emph{perfect}, and was
+represented by a complete circle; applied to the @emph{prolatio}, it was called
+@emph{major} and was represented by a dot in the middle of the sign. The binary
+ratio applied to the @emph{tempus} was called @emph{imperfect}, and was
+represented by an incomplete circle; applied to @emph{prolatio}, it was called
+@emph{minor} and was represented by the lack of an internal dot. There are four
+possible combinations, which can be represented in modern time signatures with
+and without reduction of note values. (These signs are hard-coded in LilyPond
+with reduction.)
+
+@table @dfn
+@item perfect @emph{tempus} with major @emph{prolatio}
+Indicated by a complete circle with an internal dot. In modern time signatures,
+this equals:
+@itemize
+@item 9/4, with reduction or
+@item 9/2, without reduction
+@end itemize
+
+@item perfect @emph{tempus} and minor @emph{prolatio}
+Indicated by a complete circle without an internal dot. In modern time
+signatures, this equals:
+@itemize
+@item 3/2, with reduction or
+@item 3/1, without reduction
+@end itemize
+
+@item imperfect @emph{tempus} and major @emph{prolatio}
+Indicated by an incomplete circle with an internal dot. In modern time
+signatures, this equals:
+@itemize
+@item 6/4, with reduction or
+@item 6/2, without reduction
+@end itemize
+
+@item imperfect @emph{tempus} and minor @emph{prolatio}
+Indicated by an incomplete circle without an internal dot. In modern time
+signatures, this equals:
+@itemize
+@item 4/4, with reduction or
+@item 2/1, without reduction
+@end itemize
+@end table
+
+The last mensuration sign @emph{looks} like common-time because it @emph{is},
+with note values reduced from the original semibreve to a modern quarter note.
+Being doubly imperfect, this sign represented the (theoretically)
+least-preferred mensuration, but it was actually used fairly often.
+
+This system extended to the ratio of longer note values to each other:
+
+@itemize
+
+ @item maxima to longa, called:
+
+ @itemize
+
+ @item @notation{modus maximorum},
+ @item @notation{modus major}, or
+ @item @notation{maximodus})
+
+ @end itemize
+
+ @item longa to breve, called:
+
+ @itemize
+
+ @item @notation{modus longarum},
+ @item @notation{modus minor}, or
+ @item @notation{modus}
+
+ @end itemize
+
+@end itemize
+
+In the absence of any other indication, these modes were assumed to be
+binary. The mensuration signs only indicated tempus and prolatio, so
+composers needed another way to indicate these longer ratios (called modes.
+Around the middle of the 15th century started to use groups of rests at the
+beginning of the staff, preceding the mensuration sign.
+
+
+Two mensuration signs have survived to the present day: the C-shaped sign,
+which originally designated @notation{tempus imperfectum} and
+@notation{prolatio minor} now stands for @notation{common time}; and the
+slashed C, which designated the same with @notation{diminution} now stands
+for @notation{cut-time} (essentially, it has not lost its original meaning).
+
+@seealso
+@ref{diminution}, @ref{proportion}, @ref{time signature}.
+@c TODO: more cross-references?
+
+
@node meter
@section meter
the source, with sharps in the accompaniment where the voice has flats and
@emph{vice versa}.)
-
Compound duple meter (unknown):
@lilypond[quote,line-width=13.0\cm]
TODO: add information from discussion on lilypond-user related to polymeter.
-
@seealso
@ref{accent}, @ref{hemiola}, @ref{note value}, @ref{time signature}
followed acknowledged Winkler as the creator, but by then Mälzel had already
sold many of them, and people had taken to calling it a Mälzel Metronome.
-
@seealso
@ref{metronome mark}.
@notation{MM}, which is short for Mälzels Metronom (or Mälzel's Mark,
@emph{anglice}).
-
@seealso
@ref{metronome}
@node metronomic indication
@section metronomic indication
-
@seealso
@ref{metronome mark}
S: ?,
FI: kohtalaisen, melko.
-[Italian: @q{medium}.]
+[Italian: @q{medium}]
Used to qualify other indications, such as:
@end itemize
-
@seealso
-None yet.
+No cross-references.
@node mezzo-soprano
The female voice between soprano and contralto.
-
@seealso
@ref{soprano}, @ref{contralto}.
@lilypond[quote,notime,relative=1]
\override Staff.Clef #'full-size-change = ##t
-\clef bass c1 s
-\clef alto c s
-\clef treble c s
+ \clef bass c1 s
+ \clef alto c s
+ \clef treble c s
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node minor
S: moll,
FI: molli.
-
@seealso
@ref{diatonic scale}.
S: litet intervall,
FI: pieni intervalli.
-
@seealso
@ref{interval}.
@node mixolydian mode
@section mixolydian mode
-
@seealso
@ref{diatonic scale}.
S: modus, skala,
FI: moodi, kirkkosävelasteikko.
-
@seealso
@ref{church mode}, @ref{diatonic scale}.
of a @ref{sonata form} movement modulates to the dominant key if the
key is major and to the @ref{relative key} if the key is minor.
-
@seealso
-None yet.
+No cross-references.
@node mordent
S: mordent,
FI: mordent, korukuvio.
-
@seealso
@ref{ornament}.
@node motif
@section motif
-
@seealso
@ref{motive}.
}
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node movement
S: sats,
FI: osa.
-Greater musical works like @ref{symphony} and @ref{sonata} most often
-consist of several -- more or less -- independent pieces called
-movements.
-
+Greater musical works like @ref{symphony} and @ref{sonata} most often consist of
+several -- more or less -- independent pieces called movements.
@seealso
-None yet.
+No cross-references.
@node multi-measure rest
a1
@end lilypond
-
@seealso
@ref{longa}, @ref{breve}.
S: återställningstecken,
FI: palautusmerkki.
-
@seealso
@ref{accidental}.
@node neighbor tones
@section neighbor tones
-
@seealso
@ref{acciaccatura}, @ref{appoggiatura}, @ref{grace notes},
@ref{ornament}.
S: nona,
FI: nooni.
-
@seealso
@ref{interval}.
@node non-legato
@section non-legato
-
@seealso
@ref{legato}.
and @ref{note} is strongly recommended. Briefly, one sees a note,
and hears a tone.
-
@seealso
-None yet.
+No cross-references.
@node note head
For percussion instruments (often having no defined pitch) the note head may
indicate the instrument.
-
@seealso
@ref{clef}, @ref{flag}, @ref{staff}, @ref{stem}.
@node note names
@section note names
-
@seealso
@ref{Pitch names}
S: notvärde,
FI: nuotin aika-arvo.
-Note values (durations) are measured as fractions -- normally half -- of the
-next higher note value. The longest duration in current use is the
-@emph{breve}, but sometimes (especially music from the Baroque or earlier) the
-double-length note value @emph{longa} or the quadruple-length note value
-@emph{maxima} are used.
+Note values (durations) are measured as fractions—in modern usage, one-half—of
+the next higher note value. The longest duration in current use is the
+@notation{breve} (equal to two whole notes), but sometimes (especially in music
+dating from the Baroque era or earlier) the @notation{longa} (four whole notes)
+or @notation{maxima} (eight whole notes) may be found.
+
+As used in mensural notation, this fraction was more flexible: it could also
+be one-third the higher note value. Composers indicated which proportions
+to use with various signs—two of which survive to the present day: the
+C-shaped sign for @notation{common time}, and the slashed C for
+@notation{alla breve} or @notation{cut-time}.
@c TODO -- add maxima to this example, in a way that doesn't break it.
r16_"1/16" s16 r32_"1/32" s16 r64_"1/64" s32 }
@end lilypond
-An augmentation dot after a note multiplies the duration by one and a
-half. Another dot adds yet a fourth of the duration.
+An augmentation dot after a note increases its duration by half; a second dot
+increases it by half of the first addition (that is, by a fourth of the original
+duration). More dots can be used to add further halved fractions of the
+original note value (1/8, 1/16, etc.), but they are not frequently encountered.
@lilypond[quote,line-width=13.0\cm]
\relative c'' {
}
@end lilypond
-
@seealso
-None yet.
+@ref{common time}.
@node octavation
@section octavation
-
@seealso
@ref{octave marking}.
For uses like @notation{all'ottava} or @notation{8va} with an extender line or
bracket, or @notation{loco} see octave marking.
-
@seealso
@ref{interval}, @ref{octave marking}.
In the phrases above, @notation{quindicesima} is sometimes replaced with
@notation{quindecima}, which is Latin.
-Finally, the music on an entire staff can be marked to be played in a
-different octave by putting a small 8 or 15 above or below the clef at the
-beginning. This octave mark can be applied to any clef, but it is most
-frequently used with the G and F clefs.
-
+The music on an entire staff can be marked to be played in a different octave by
+putting a small 8 or 15 above or below the clef at the beginning. This octave
+mark can be applied to any clef, but it is most frequently used with the G and F
+clefs.
@seealso
@ref{F clef}, @ref{G clef}, @ref{loco}, @ref{octave marking}.
For a list of the specific marks used, see @ref{octave mark}.
-
@seealso
@ref{interval}, @ref{loco}, @ref{octave}, @ref{octave mark}.
@node octave sign
@section octave sign
-
@seealso
@ref{octave mark}.
>>
@end lilypond
-
@seealso
@ref{acciaccatura}, @ref{appoggiatura}, @ref{grace notes}.
score, usually only a few measures long, which presents another version
of the music, for example for small hands.
-
@seealso
-None yet.
+No cross-references.
@node part
@end itemize
-
@seealso
@ref{counterpoint}
@node pause
@section pause
-
@seealso
@ref{fermata}.
@node pennant
@section pennant
-
@seealso
@ref{flag}.
@lilypond[quote,relative=2,line-width=13.0\cm]
\time 4/4
-\repeat percent 4 { c4_"Beat (or slash) repeat" }
-\repeat percent 2 { c4 e g b_"Measure repeat" }
-\repeat percent 2 { c,2 es | f4 fis g c_"Multi-measure repeat" | }
+ \repeat percent 4 { c4_"Beat (or slash) repeat" }
+ \repeat percent 2 { c4 e g b_"Measure repeat" }
+ \repeat percent 2 { c,2 es | f4 fis g c_"Multi-measure repeat" | }
@end lilypond
-
@seealso
@ref{repeat},
@uref{http://www.music.vt.edu/musicdictionary/textr/Repeat.html,University of
tambourine, cymbals, chinese gong (tam-tam), triangle, celesta, glockenspiel,
and xylophone.
-
@seealso
-None yet.
+No cross-references.
@node perfect interval
S: rent intervall,
FI: puhdas intervalli.
-
@seealso
@ref{interval}.
A natural division of the melodic line, comparable to a sentence of speech.
-
@seealso
@ref{caesura}.
The clear rendering in musical performance of the @notation{phrases} of the
melody. Phrasing may be indicated by a @notation{slur}.
-
@seealso
@ref{phrase}, @ref{slur}.
@emph{piano} (@b{p}) soft, @emph{pianissimo} (@b{pp}) very soft,
@emph{mezzo piano} (@b{mp}) medium soft.
-
@seealso
-None yet.
+No cross-references.
@node pickup
S: upptakt,
FI: kohotahti.
-
@seealso
@ref{anacrusis}.
@end enumerate
-
@seealso
@ref{Pitch names}.
A technique for stringed instruments, abbr. @emph{pizz}. To play by plucking
the strings.
-
@seealso
-None yet.
+No cross-references.
@node polymeter
@end itemize
-
@seealso
@ref{polymetric} (adj.)
Characterized by @emph{polymeter}: using two or more metric frameworks
simultaneously or in alternation.
-
@seealso
@ref{polymeter} (noun)
A time signature that indicates regularly alternating polymetric time.
-
@seealso
@ref{polymetric}.
Music written in a combination of several simultaneous voices (parts)
of a more or less pronounced individuality.
-
@seealso
@ref{counterpoint}.
without changing the bow's direction. It is used for passages of a
@notation{cantabile} character.
-
@seealso
@ref{legato}.
Very quick, i.e., quicker than @ref{allegro}; @emph{prestissimo}
denotes the highest possible degree of speed.
-
@seealso
-None yet.
+No cross-references.
@node proportion
S: ?,
FI: suhde.
-[Latin: @emph{proportio}.] Described in great detail by Gaffurius, in
+[Latin: @emph{proportio}] Described in great detail by Gaffurius, in
@emph{Practica musicae} (published in Milan in 1496). In mensural notation,
proportion is:
@itemize
@item 2:1 (or simply 2), expressed by a vertical line through the
-mensuration sign (the origin of the @q{cut-time} time signature), or by
-turning the sign backwards
+mensuration sign (the origin of the @notation{alla breve} time signature),
+or by turning the sign backwards
@item 3:1 (or simply 3)
@item 3:2 (@emph{sesquialtera})
@end itemize
@c TODO: add an example or two. O => 4/3, and its modern equivalent
-
@seealso
@ref{mensural notation}.
than the C obtained by adding 7 octaves. The difference between those two
pitches is the Pythagorean comma.
-
@seealso
@ref{cent}, @ref{temperament}.
S: kvartol,
FI: kvartoli.
-
@seealso
@ref{note value}.
and D^m7 are all identical). The last three chords are not commonly used
except in jazz.
-
@seealso
@ref{chord}.
@item FI: neljäsosanuotti
@end itemize
-
@seealso
@ref{note value}.
@item FI: neljäsosatauko
@end itemize
-
@seealso
@ref{note value}.
An interval equal to half a semitone.
-
@seealso
@ref{interval}
S: kvintol,
FI: kvintoli.
-
@seealso
@ref{note value}.
[Italian] A performance indication, abbreviated "rall.".
-
@seealso
@ref{ritardando}.
\bar "||"
@end lilypond
-
@seealso
@ref{key}, @ref{key signature}, @ref{major}, @ref{minor}.
}
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node rest
@c F: 'pause' if you mean a whole rest, 'silence' if you do not want to
@c specify the rest's value.
-
@seealso
@ref{note value}.
@end itemize
-
@seealso
-None yet.
+No cross-references.
@node ritardando
Gradually slackening in speed. Mostly abbreviated to rit.@: or ritard.
-
@seealso
-None yet.
+No cross-references.
@node ritenuto
Immediate reduction of speed.
-
@seealso
-None yet.
+No cross-references.
@node scale
S: skala,
FI: asteikko, sävelasteikko.
-
@seealso
@ref{diatonic scale}.
>>
@end lilypond
-
@seealso
@ref{functional harmony}.
S: ?,
FI: epätavallinen viritys.
-[Italian: @emph{scordare}, @q{to mistune}.] Unconventional
+[Italian: @emph{scordare}, @q{to mistune}] Unconventional
tuning of stringed instruments, particularly lutes or violins. Used
to:
Tunings that could be called @var{scordatura} first appeared early in
the 16th Century and became commonplace in the 17th.
-
@seealso
-None yet.
+No cross-references.
@node score
instrument is to play, each voice to sing, having each part arranged
one underneath the other on different staves @ref{staff}.
-
@seealso
-None yet.
+No cross-references.
@node second
S: sekund,
FI: sekunti.
-The @ref{interval} between two neighboring tones of a scale. A
-@ref{diatonic scale} consists of alternating @ref{semitone}s and
-@ref{whole tone}s, hence the size of a second depends on the scale
-degrees in question.
+The interval between two neighboring tones of a scale. A diatonic scale
+consists of alternating semitones and whole tones, hence the size of a
+second depends on the scale degrees in question.
+
+@seealso
+@ref{diatonic scale}, @ref{interval}, @ref{semitone}, @ref{whole tone}.
+
+
+@node semibreve
+@section semibreve
+
+@itemize
+@item US: whole note,
+@item ES: redonda,
+@item I: semibreve,
+@item F: ronde,
+@item D: Ganze, ganze Note,
+@item NL: hele noot,
+@item DK: helnode,
+@item S: helnot,
+@item FI: kokonuotti.
+@end itemize
+
+Note value: called @notation{whole note} in the US.
+The semibreve is the basis for the @notation{tactus} in mensural notation
+(i.e. music written before ca. 1600).
@seealso
-None yet.
+@ref{mensural notation}, @ref{note value}.
@node semitone
FI: puolisävel.
The interval of a minor second. The (usually) smallest interval in European
-composed music. The interval between two neighbouring tones on the piano
+composed music. The interval between two neighboring tones on the piano
keyboard -- including black and white keys -- is a semitone. An octave may
be divided into 12@w{ }semitones.
g1 gis s a bes s b! c
@end lilypond
-
@seealso
@ref{interval}, @ref{chromatic scale}.
S: septim,
FI: septimi.
-
@seealso
@ref{interval}.
@node sextolet
@section sextolet
-
@seealso
@ref{sextuplet}, @ref{note value}.
S: sextol,
FI: sekstoli.
-
@seealso
@ref{note value}.
@node shake
@section shake
-
@seealso
@ref{trill}.
S: kors@-förtecken,
FI: korotusmerkki.
-
@seealso
@ref{accidental}.
S: ?,
FI: samoin.
-[Italian: @q{in the same manner}.] Performance direction: the music thus marked
+[Italian: @q{in the same manner}] Performance direction: the music thus marked
is to be played in the same manner (i.e. with the same articulations, dynamics,
etc.) as the music that precedes it.
-
@seealso
TODO: Where else could I refer the reader?
A meter in which the basic beat is subdivided in two: that is, a meter
that does not include triplet subdivision of the beat.
-
@seealso
@ref{compound meter}, @ref{meter}.
@item FI: kuudestoistaosanuotti
@end itemize
-
@seealso
@ref{note value}.
@item FI: kuudestoistaosatauko
@end itemize
-
@seealso
@ref{note value}.
S: sext,
FI: seksti.
-
@seealso
@ref{interval}.
@item FI: kuudeskymmenesneljäsosanuotti
@end itemize
-
@seealso
@ref{note value}.
@item FI: kuudeskymmenesneljäsosatauko
@end itemize
-
@seealso
@ref{note value}.
@node slash repeat
@section slash repeat
-
@seealso
@ref{percent repeat}.
played @ref{legato}, e.g., with one stroke of the violin bow or with
one breath in singing.
-
@seealso
-None yet.
+No cross-references.
@node solmization
@emph{re}, @emph{mi}, @emph{fa}, @emph{sol}, @emph{la}, @emph{si}
(@emph{ti})).
-
@seealso
@ref{scale}, @ref{scale degree}.
accompaniment, which consists of three or four independant pieces,
called movements.
-
@seealso
-None yet.
+No cross-references.
@node sonata form
@notation{dominant} if the @notation{tonic} is @notation{major}, and in the
@notation{relative key} if the tonic is @notation{minor}.
-
@seealso
@ref{dominant}, @ref{major}, @ref{minor}, @ref{relative key}, @ref{sonata},
@ref{symphony}, @ref{tonic}.
@node song texts
@section song texts
-
@seealso
@ref{lyrics}.
The highest female voice.
-
@seealso
-None yet.
+No cross-references.
@node staccato
@lilypond[quote,relative=2]
\key d \major
\time 4/4
-\partial 8 a8 |
-d4-\staccato cis-\staccato b-\staccato cis-\staccato |
-d2.
-\bar "||"
+ \partial 8 a8 |
+ d4-\staccato cis-\staccato b-\staccato cis-\staccato |
+ d2.
+ \bar "||"
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node staff
indicating (in connection with a @ref{clef}) their pitch. Staves for
@ref{percussion} instruments may have fewer lines.
-
@seealso
-None yet.
+No cross-references.
@node staves
@section staves
-
@seealso
@ref{staff}.
@lilypond[quote,notime,relative=2]
\set Score.autoBeaming = ##f
-g2_"1/2" g' s16
-g,4_"1/4" g' s16
-g,8_"1/8" g' s16
-g,16_"1/16" g' s16
+ g2_"1/2" g' s16
+ g,4_"1/4" g' s16
+ g,8_"1/8" g' s16
+ g,16_"1/16" g' s16
@end lilypond
-
@seealso
@ref{beam}.
S: ?,
FI: kiihdyttäen, nopeuttaen.
-[Italian: @q{pressing}.] Pressing, urging, or hastening the time, as to a
+[Italian: @q{pressing}] Pressing, urging, or hastening the time, as to a
climax.
-
@seealso
@ref{accelerando}.
commonly used in a symphony orchestra are violin, viola, violoncello,
and double bass.
-
@seealso
-None yet.
+No cross-references.
@node strong beat
S: betonat taktslag,
FI: tahdin vahva isku.
-
@seealso
@ref{beat}, @ref{accent}, @ref{measure}, @ref{rhythm}.
The fourth @notation{scale degree}.
-
@seealso
@ref{functional harmony}, @ref{scale degree}.
The sixth @notation{scale degree}.
-
@seealso
@ref{functional harmony}, @ref{scale degree}, @ref{superdominant}.
The seventh @ref{scale degree}.
-
@seealso
@ref{functional harmony}, @ref{scale degree}.
Indicates that the indicated passage (or note) should be played on the
G string.
-
@seealso
-None yet.
+No cross-references.
@node superdominant
The sixth @ref{scale degree}.
-
@seealso
@ref{functional harmony}, @ref{scale degree}, @ref{submediant}.
The second @ref{scale degree}.
-
@seealso
@ref{functional harmony}, @ref{scale degree}.
A symphony may be defined as a @emph{sonata} for orchestra.
-
@seealso
@ref{sonata}.
c4
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node syntonic comma
This comma is also known as the comma of Didymus, or didymic comma.
-
@seealso
@ref{Pythagorean comma}
The collection of staves (@notation{staff}), two or more, as used for writing
down of keyboard, chamber, choral, or orchestral music.
-
@seealso
@ref{staff}.
Systems of tuning in which the intervals deviate from the acoustically
pure intervals.
-
@seealso
@ref{meantone temperament}, @ref{equal temperament}.
@notation{adagio}, @notation{andante}, @notation{allegro}, and
@notation{presto}.
-
@seealso
@ref{adagio}, @ref{allegro}, @ref{andante}, @ref{largo}, @ref{presto}.
The highest @q{natural} male voice (apart from @notation{countertenor}).
-
@seealso
@ref{countertenor}.
S: decima,
FI: desimi.
-
@seealso
@ref{note value}.
An indication that a particular note should be held for the whole
length, although this can vary depending on the composer and era.
-
@seealso
-None yet.
+No cross-references.
@node third
S: ters,
FI: terssi.
-
@seealso
@ref{interval}.
@item FI: kolmaskymmeneskahdesosanuotti
@end itemize
-
@seealso
@ref{note value}.
@item FI: kolmaskymmeneskahdesosatauko
@end itemize
-
@seealso
@ref{note value}.
@node thorough bass
@section thorough bass
-
@seealso
@ref{figured bass}.
@node time
@section time
-
@seealso
@ref{meter}.
meter. It most often takes the form of a fraction, but a few signs
derived from mensural notation and proportions are also employed.
-
@seealso
-@ref{mensural notation}, @ref{meter}.
+@ref{mensural notation}, @ref{mensuration sign}, @ref{meter}.
@node tone
@c Music from the 20th century may be based on atonal sounds. Meh, not so much
-
@seealso
-None yet.
+No cross-references.
@node tonic
The first @notation{scale degree}.
-
@seealso
@ref{functional harmony}, @ref{scale degree}.
@end itemize
-
@seealso
@ref{concert pitch}.
}
@end lilypond
-
@seealso
-None yet.
+No cross-references.
@node treble clef
S: diskantklav,
FI: diskanttiavain.
-
@seealso
@ref{G clef}.
@end enumerate
@lilypond[quote,notime,relative=1]
-e2:32_"a"
-f:32 [ e8:16 f:16 g:16 a:16 ] s4
-\repeat tremolo 8 { e32_"b" g }
+ e2:32_"a"
+ f:32 [ e8:16 f:16 g:16 a:16 ] s4
+ \repeat tremolo 8 { e32_"b" g }
@end lilypond
-
@seealso
@ref{strings}
S: treklang,
FI: kolmisointu.
-
@seealso
@ref{chord}.
S: drill,
FI: trilli.
-
@seealso
@ref{ornament}.
S: tretakt,
FI: kolmijakoinen.
-
@seealso
@ref{meter}.
S: triol,
FI: trioli.
-
@seealso
@ref{note value}.
S: tritonus,
FI: tritonus.
-
@seealso
@ref{interval}.
@emph{A} above middle C (440 cps/Hz), which is the international tuning
standard. Tuning forks for other pitches are available.
-
@seealso
@ref{middle C}.
indicated with a bracket and a number indicating the number of
subdivisions.
-
@seealso
@ref{triplet}, @ref{note value}.
S: dubbelslag,
FI: korukuvio.
-
@seealso
@ref{ornament}.
(voices) or by the whole orchestra (choir), either at exactly the same
pitch or in a different octave.
-
@seealso
-None yet.
+No cross-references.
@node upbeat
S: upptakt,
FI: kohotahti.
-
@seealso
@ref{anacrusis}
@end itemize
-
@seealso
-None yet.
+No cross-references.
@node volta
S: ?,
FI: yksi kertauksen maaleista.
-[Italian: @q{time} (instance, not duration).] An ending, such as a first
+[Italian: @q{time} (instance, not duration)] An ending, such as a first
or second ending. LilyPond extends this idea to any number, and allows any text
(not just a number) -- to serve as the @notation{volta} text.
-
@seealso
-None yet.
+No cross-references.
@node weak beat
S: obetonat taktslag,
FI: tahdin heikko isku.
-
@seealso
@ref{beat}, @ref{measure}, @ref{rhythm}.
@item FI: kokonuotti
@end itemize
-
@seealso
@ref{note value}.
@item FI: kokotauko
@end itemize
-
@seealso
@ref{note value}.
on the piano keyboard with exactly one key between them -- including
black and white keys -- is a whole tone.
-
@seealso
@ref{interval}.
commonly used in a symphony orchestra are flute, oboe, clarinet,
saxophone, and bassoon.
-
@seealso
-None yet.
+No cross-references.
@node Duration names notes and rests
@end multitable
-
@seealso
@ref{mensural notation}
@cindex durations, of notes
@cindex note durations
+@cindex length of notes
+@cindex note lengths
@funindex \longa
+@funindex longa
@funindex \breve
+@funindex breve
@funindex \maxima
+@funindex maxima
-Durations are designated by numbers and dots.
-Durations are entered as their reciprocal values. For example, a
-quarter note is entered using a @code{4} (since it is a 1/4 note),
-and a half note is entered using a @code{2} (since it is a 1/2
-note). For notes longer than a whole you must use the
-@code{\longa} (a double breve) and @code{\breve} commands.
-Durations as short as 128th notes may be specified. Shorter values
-are possible, but only as beamed notes.
+Durations are designated by numbers and dots. Durations are entered
+as their reciprocal values. For example, a quarter note is entered
+using a @code{4} (since it is a 1/4 note), and a half note is entered
+using a @code{2} (since it is a 1/2 note). For notes longer than a
+whole you must use the @code{\longa} (a double breve) and
+@code{\breve} commands. Durations as short as 128th notes may be
+specified. Shorter values are possible, but only as beamed notes.
@c Two 64th notes are needed to obtain beams
@lilypond[quote,verbatim,relative=2]
@cindex notes, dotted
@cindex dotted notes
+@cindex notes, double-dotted
+@cindex double-dotted notes
@funindex .
proportional notation, see @ref{Proportional notation}.
@funindex \dotsUp
+@funindex dotsUp
@funindex \dotsDown
+@funindex dotsDown
@funindex \dotsNeutral
+@funindex dotsNeutral
Dots are normally moved up to avoid staff lines, except in
polyphonic situations. Predefined commands are available to
Music Glossary:
@rglos{breve},
@rglos{longa},
+@rglos{maxima},
@rglos{note value},
@rglos{Duration names notes and rests}.
@cindex tuplets
@cindex triplets
+
@funindex \times
+@funindex times
Tuplets are made from a music expression by multiplying all the
durations with a fraction:
@end lilypond
@funindex \tupletUp
+@funindex tupletUp
@funindex \tupletDown
+@funindex tupletDown
@funindex \tupletNeutral
+@funindex tupletNeutral
The automatic placement of the tuplet bracket above or below the
notes may be overridden manually with predefined commands, for
@cindex durations, scaling
You can alter the duration of single notes, rests or chords by a
-fraction @code{N/M} by appending @code{*N/M} (or @code{*N} if
-@code{M} is 1) to the duration.
-This will not affect the appearance of the notes or rests
-produced, but the altered duration will be used in calculating the
-position within the measure and setting the duration in the MIDI
-output. Multiplying factors may be combined such as @code{*L*M/N}.
+fraction @code{N/M} by appending @code{*N/M} (or @code{*N} if @code{M}
+is 1) to the duration. This will not affect the appearance of the
+notes or rests produced, but the altered duration will be used in
+calculating the position within the measure and setting the duration
+in the MIDI output. Multiplying factors may be combined such as
+@code{*L*M/N}.
In the following example, the first three notes take up exactly
two beats, but no triplet bracket is printed.
@code{s1*23}.
@cindex compressing music
+
@funindex \scaleDurations
+@funindex scaleDurations
Longer stretches of music may be compressed by a fraction in the
same way, as if every note, chord or rest had the fraction as a
@unnumberedsubsubsec Ties
@cindex tie
+
@funindex ~
A tie connects two adjacent note heads of the same pitch. The tie
<c~ e g~ b> <c e g b>
@end lilypond
-@funindex \repeatTie
@cindex repeating ties
@cindex ties, repeating
@cindex volta brackets and ties
@cindex ties and volta brackets
+@funindex \repeatTie
+@funindex repeatTie
+
When a second alternative of a repeat starts with a tied note, you
have to specify the repeated tie as follows:
@cindex laissez vibrer
@cindex ties, laissez vibrer
+
@funindex \laissezVibrer
+@funindex laissezVibrer
@notation{L.v.} ties (@notation{laissez vibrer}) indicate that
notes must not be damped at the end. It is used in notation for
@end lilypond
@cindex ties, placement
+
@funindex \tieUp
+@funindex tieUp
@funindex \tieDown
+@funindex tieDown
@funindex \tieNeutral
+@funindex tieNeutral
The vertical placement of ties may be controlled, see
Predefined commands, or for details, see
@ref{Direction and placement}.
@cindex ties, appearance
+@cindex ties, dotted
+@cindex ties, dashed
+@cindex dashed ties
+@cindex dotted ties
+
@funindex \tieDotted
+@funindex tieDotted
@funindex \tieDashed
+@funindex tieDashed
@funindex \tieSolid
+@funindex tieSolid
Solid, dotted or dashed ties may be specified, see Predefined
commands.
-
-
@node Writing rests
@subsection Writing rests
@unnumberedsubsubsec Rests
@cindex rest
@cindex rest, entering durations
-@cindex maxima
-@cindex longa
-@cindex breve
+@cindex maxima rest
+@cindex longa rest
+@cindex breve rest
@funindex \rest
+@funindex rest
@funindex r
@funindex \maxima
+@funindex maxima
@funindex \longa
+@funindex longa
@funindex \breve
+@funindex breve
Rests are entered like notes with the note name @code{r}.
Durations longer than a whole rest use the predefined
@seealso
+Music Glossary:
+@rglos{breve},
+@rglos{longa},
+@rglos{maxima}.
+
Notation Reference:
@ref{Full measure rests}.
@cindex invisible rest
@cindex rest, invisible
@cindex spacer note
+@cindex spacer rest
+
@funindex s
@funindex \skip
+@funindex skip
An invisible rest (also called a @q{spacer rest}) can be entered
like a note with the note name@tie{}@code{s}:
@cindex rest, multi-measure
@cindex rest, full-measure
@cindex whole rest for a full measure
+@cindex rest, whole for a full measure
+
@funindex R
Rests for one or more full measures are entered like notes with
b2^"Tutti" b4 a4
@end lilypond
-The duration of full-measure rests is identical to the duration notation
-used for notes. The duration in a multi-measure rest must always be an
-integral number of measure-lengths, so augmentation
-dots or fractions must often be used:
+The duration of full-measure rests is identical to the duration
+notation used for notes. The duration in a multi-measure rest must
+always be an integral number of measure-lengths, so augmentation dots
+or fractions must often be used:
@lilypond[quote,fragment,verbatim]
\compressFullBarRests
R4*5*4 |
@end lilypond
-A full-measure rest is printed as either a whole
-or breve rest, centered in the measure, depending on the time
-signature.
+A full-measure rest is printed as either a whole or breve rest,
+centered in the measure, depending on the time signature.
@lilypond[quote,verbatim,fragment]
\time 4/4
\time 8/4
R1*2 |
@end lilypond
-@funindex \expandFullBarRests
-@funindex \compressFullBarRests
+
@cindex multi-measure rest, expanding
@cindex multi-measure rest, contracting
-By default a multi-measure rest is expanded in the printed score
-to show all the rest measures explicitly.
-Alternatively, a mult-measure rest can be shown as a single measure
-containing a multi-measure rest symbol, with the number of measures of rest
-printed above the measure:
+@funindex \expandFullBarRests
+@funindex expandFullBarRests
+@funindex \compressFullBarRests
+@funindex compressFullBarRests
+
+By default a multi-measure rest is expanded in the printed score to
+show all the rest measures explicitly. Alternatively, a multi-measure
+rest can be shown as a single measure containing a multi-measure rest
+symbol, with the number of measures of rest printed above the measure:
@lilypond[quote,fragment,verbatim]
% Default behavior
@cindex fermata on multi-measure rest
@cindex multi-measure rest, attaching fermata
+@funindex \fermataMarkup
+@funindex fermataMarkup
+
Markups can be added to multi-measure rests.
The predefined command @code{\fermataMarkup}
is provided for adding fermatas.
@rinternals{MultiMeasureRestText}.
+@cindex fingerings and multi-measure rests
+@cindex multi-measure rests and fingerings
+
@knownissues
If an attempt is made to use fingerings (e.g.,
@cindex time signature
@cindex meter
+
@funindex \time
+@funindex time
The time signature is set as follows:
\time 3/4 c2.
@end lilypond
-@cindex Time signature, visibility of
+@cindex time signature, visibility of
Time signatures are printed at the beginning of a piece
and whenever the time signature changes. If a change takes place
c c c c
@end lilypond
+@cindex time signature style
+@cindex meter style
+
@funindex \numericTimeSignature
+@funindex numericTimeSignature
@funindex \defaultTimeSignature
-@cindex time signature style
+@funindex defaultTimeSignature
The time signature symbol that is used in 2/2 and 4/4 time can be
changed to a numeric style:
@cindex pickup measure
@cindex measure, change length
@cindex measurePosition
+
@funindex \partial
+@funindex partial
Partial or pick-up measures, such as an anacrusis or upbeat, are
entered using the @code{\partial} command, with the syntax
@node Unmetered music
@unnumberedsubsubsec Unmetered music
-@funindex \cadenzaOn
-@funindex \cadenzaOff
@cindex bar lines, turning off
@cindex bar numbering, turning off
@cindex cadenza
@cindex unmetered music
+@funindex \cadenzaOn
+@funindex cadenzaOn
+@funindex \cadenzaOff
+@funindex cadenzaOff
+
Bar lines and bar numbers are calculated automatically. For
unmetered music (some cadenzas, for example), this is not desirable.
To turn off automatic calculation of bar lines and bar numbers,
@cindex double time signatures
@cindex signatures, polymetric
+@cindex time signatures, polymetric
+@cindex time signatures, double
@cindex polymetric signatures
@cindex meter, polymetric
+@funindex timeSignatureFraction
+@funindex \scaleDurations
+@funindex scaleDurations
+@funindex \times
+@funindex times
+
Polymetric notation is supported, either explicitly or by modifying
the visible time signature symbol and scaling the note durations.
@code{\times}, but does not create a tuplet bracket; see
@ref{Scaling durations}.
+@cindex beaming in polymetric music
+@cindex beaming in polymetric meter
+
In this example, music with the time signatures of 3/4, 9/8, and
10/8 are used in parallel. In the second staff, shown durations
are multiplied by 2/3, as 2/3 * 9/8 = 3/4, and in the third
@cindex notes, splitting
@cindex splitting notes
+@funindex Note_heads_engraver
+@funindex Completion_heads_engraver
+
Long notes which overrun bar lines can be converted automatically
to tied notes. This is done by replacing the
@code{Note_heads_engraver} with the
@node Showing melody rhythms
@unnumberedsubsubsec Showing melody rhythms
+@cindex melody rhythms, showing
+@cindex rhythms, showing melody
+
Sometimes you might want to show only the rhythm of a melody. This
can be done with the rhythmic staff. All pitches of notes on such a
staff are squashed, and the staff itself has a single line
>>
@end lilypond
+@cindex guitar chord charts
+@cindex strumming rhythms, showing
+@cindex guitar strumming rhythms, showing
+
+@funindex Pitch_squash_engraver
+@funindex \improvisationOn
+@funindex improvisationOn
+@funindex \improvisationOff
+@funindex improvisationOff
+
Guitar chord charts often show the strumming rhythms. This can
be done with the @code{Pitch_squash_engraver} and
@code{\improvisationOn}.
@cindex beams, manual
@cindex manual beams
+@cindex beams, setting rules for
+@cindex beams, custom rules for
+
+@funindex measureLength
+@funindex beatLength
+@funindex beatGrouping
+@funindex \autoBeamOn
+@funindex autoBeamOn
+@funindex \autoBeamOff
+@funindex autoBeamOff
@lilypond[quote,verbatim,relative=2]
\time 2/4 c8 c c c
@code{beatGrouping}. Both the beaming rules and the context
properties can be overridden, see @ref{Setting automatic beam behavior}.
-@cindex autoBeamOn
-@cindex autoBeamOff
@warning{If beams are used to indicate melismata in songs, then automatic
beaming should be switched off with @code{\autoBeamOff} and the beams
@snippets
+@cindex line breaks and beams
+@cindex beams and line breaks
+
@funindex breakable
-@cindex break, line
-@cindex line breaks
@lilypondfile[verbatim,lilyquote,ragged-right,texidoc,doctitle]
{beams-across-line-breaks.ly}
@node Setting automatic beam behavior
@unnumberedsubsubsec Setting automatic beam behavior
-@funindex autoBeaming
-@funindex autoBeamSettings
-@funindex (end * * * *)
-@funindex (begin * * * *)
@cindex automatic beams, tuning
@cindex tuning automatic beaming
@cindex autobeam
@cindex lyrics and beaming
+@funindex autoBeaming
+@funindex autoBeamSettings
+@funindex (end * * * *)
+@funindex (begin * * * *)
+@funindex measureLength
+@funindex beatLength
+@funindex beatGrouping
+@funindex \time
+@funindex time
+@funindex \set
+@funindex set
The placement of automatic beams is determined by the rules
described in @ref{Automatic beams}. There are two mutually
@lilypondfile[verbatim,lilyquote,ragged-right,texidoc,doctitle]
{beam-endings-in-score-context.ly}
+@funindex \autoBeamOff
+@funindex autoBeamOff
+@funindex \autoBeamOn
+@funindex autoBeamOn
@predefined
-@funindex \autoBeamOff
@code{\autoBeamOff},
-@funindex \autoBeamOn
@code{\autoBeamOn}.
@endpredefined
+@cindex beam, last in score
+@cindex beam, last in polyphonic voice
+
@knownissues
If a score ends while an automatic beam has not been ended and is
@unnumberedsubsubsec Manual beams
@cindex beams, manual
+@cindex manual beams
+
@funindex ]
@funindex [
@end lilypond
+@funindex \noBeam
+@funindex noBeam
+
Individual notes may be marked with @code{\noBeam} to prevent them
from being beamed:
@unnumberedsubsubsec Feathered beams
@cindex beams, feathered
+
@funindex \featherDurations
+@funindex featherDurations
+@funindex grow-direction
Feathered beams are used to indicate that a small group of notes
should be played at an increasing (or decreasing) tempo, without
@code{featheredDurations} command which specifies the ratio
between the durations of the first and last notes in the group.
-The square brackets
-show the extent of the beam and the braces show
-which notes are to have their durations modified. Normally
-these would delimit the same group of notes, but this is not
-required: the two commands are independent.
+The square brackets show the extent of the beam and the braces show
+which notes are to have their durations modified. Normally these
+would delimit the same group of notes, but this is not required: the
+two commands are independent.
In the following example the eight 16th notes occupy exactly the
same time as a half note, but the first note is one half as long
@unnumberedsubsubsec Bar lines
@cindex bar lines
-@funindex \bar
@cindex measure lines
@cindex repeat bars
+@funindex \bar
+@funindex bar
+
Bar lines delimit measures, and are also used to indicate
repeats. Normally, simple bar lines are automatically inserted
into the printed output at places based on the current time
counter is not increased. To force a line break see
@ref{Line breaking}.
+@cindex manual bar lines
+@cindex manual measure lines
+@cindex bar lines, manual
+@cindex measure lines, manual
+
This and other special bar lines may be inserted manually at any
-point. When they coincide with the end of a measure they replace
-the simple bar line which would have been inserted there
-automatically. When they do not coincide
-with the end of a measure the specified bar line is inserted at that
-point in the printed output. Such insertions do not affect
-the calculation and placement of subsequent automatic bar lines.
+point. When they coincide with the end of a measure they replace the
+simple bar line which would have been inserted there automatically.
+When they do not coincide with the end of a measure the specified bar
+line is inserted at that point in the printed output. Such insertions
+do not affect the calculation and placement of subsequent automatic
+bar lines.
The simple bar line and five types of double bar line are available
for manual insertion:
@funindex whichBar
@funindex defaultBarType
+@funindex \bar
+@funindex bar
+@funindex bartype
The command @code{\bar }@var{bartype} is a shortcut for
@code{\set Timing.whichBar = }@var{bartype}. A bar line is
@cindex bar numbers
@cindex measure numbers
+@cindex numbers, bar
+@cindex numbers, measure
+
@funindex currentBarNumber
Bar numbers are typeset by default at the start of every line except
@snippets
-@funindex barNumberVisibility
@cindex bar numbers, regular spacing
+@funindex barNumberVisibility
+
Bar numbers can be typeset at regular intervals instead of just at
the beginning of every line. To do this the default behavior
must be overridden to permit bar numbers to be printed at places
Internals Reference: @rinternals{BarNumber}.
+@cindex bar number collision
+@cindex collision, bar number
+
@knownissues
Bar numbers may collide with the top of the
@unnumberedsubsubsec Bar and bar number checks
@cindex bar check
+@cindex bar number check
+@cindex measure check
+@cindex measure number check
+
@funindex barCheckSynchronize
@funindex |
-Bar checks help detect errors in the entered durations.
-A bar check may be entered using the bar symbol, @code{|},
-at any place where a bar line is expected to fall.
-If bar check lines are encountered at other places,
-a list of warnings is printed in the log file,
-showing the line numbers and lines
-in which the bar checks failed. In the next
-example, the second bar check will signal an error.
+Bar checks help detect errors in the entered durations. A bar check
+may be entered using the bar symbol, @code{|}, at any place where a
+bar line is expected to fall. If bar check lines are encountered at
+other places, a list of warnings is printed in the log file, showing
+the line numbers and lines in which the bar checks failed. In the
+next example, the second bar check will signal an error.
@example
\time 3/4 c2 e4 | g2 |
}
@end lilypond
+@funindex \barNumberCheck
+@funindex barNumberCheck
+
When copying large pieces of music, it can be helpful to check
that the LilyPond bar number corresponds to the original that you
are entering from. This can be checked with
@cindex rehearsal marks
@cindex mark, rehearsal
+
@funindex \mark
+@funindex mark
To print a rehearsal mark, use the @code{\mark} command
@cindex format, rehearsal mark
@cindex mark, rehearsal, style
@cindex mark, rehearsal, format
+@cindex rehearsal mark, manual
+@cindex mark, rehearsal, manual
+@cindex custom rehearsal mark
The style is defined by the property @code{markFormatter}. It is
a function taking the current mark (an integer) and the current
@cindex segno
@cindex coda
@cindex D.S al Fine
+@cindex fermata
+@cindex music glyphs
+@cindex glyphs, music
+
+@funindex \musicglyph
+@funindex musicglyph
Music glyphs (such as the segno sign) may be printed inside a
@code{\mark}
@node Grace notes
@unnumberedsubsubsec Grace notes
-@funindex \grace
@cindex ornaments
@cindex grace notes
@cindex appoggiatura
@cindex acciaccatura
+@funindex \grace
+@funindex grace
+
Grace notes are ornaments that are written out. Grace notes
are printed in a smaller font and take up no logical time
in a measure.
\new Staff { c2 \grace { g8[ b] } c2 } >>
@end lilypond
-@funindex \afterGrace
-
@cindex grace notes, following
+@funindex \afterGrace
+@funindex afterGrace
+
If you want to end a note with a grace, use the @code{\afterGrace}
command. It takes two arguments: the main note, and the grace
notes following the main note.
depth = .
-SUBDIRS = buildscripts python scripts \
+SUBDIRS = python scripts \
flower lily \
mf ly \
tex ps scm \
WEB_TARGETS = offline
WWW-post:
-# need UTF8 setting in case this is hosted on a website.
+# need UTF8 setting in case this is hosted on a website.
echo -e 'AddDefaultCharset utf-8\nAddCharset utf-8 .html\nAddCharset utf-8 .en\nAddCharset utf-8 .nl\nAddCharset utf-8 .txt\n' > $(top-build-dir)/.htaccess
- $(PYTHON) $(buildscript-dir)/mutopia-index.py -o $(outdir)/examples.html input/
+ $(buildscript-dir)/mutopia-index -o $(outdir)/examples.html input/
find $(outdir) -name '*-root' | xargs rm -rf
- $(PYTHON) $(buildscript-dir)/www_post.py $(PACKAGE_NAME) $(TOPLEVEL_VERSION) $(outdir) "$(WEB_TARGETS)"
+ $(buildscript-dir)/www_post $(PACKAGE_NAME) $(TOPLEVEL_VERSION) $(outdir) "$(WEB_TARGETS)"
find $(outdir)/offline-root -type l -delete
@false
grand-replace:
- PATH=$(buildscript-dir)/$(outbase):$(PATH) $(BASH) $(buildscript-dir)/grand-replace.sh
+ $(MAKE) -C scripts/build
+ PATH=$(buildscript-dir):$(PATH) $(buildscript-dir)/grand-replace
################################################################
# testing
local-check: test
rm -rf $(RESULT_DIR)
mkdir -p $(RESULT_DIR)
- $(PYTHON) $(buildscript-dir)/output-distance.py --create-images --output-dir $(RESULT_DIR) input/regression/out-test-baseline input/regression/out-test/
+ $(buildscript-dir)/output-distance --create-images --output-dir $(RESULT_DIR) input/regression/out-test-baseline input/regression/out-test/
@find input ly -name '*.ly' -print |grep -v 'out.*/' | xargs grep '\\version' -L | grep -v "standard input" |sed 's/^/**** Missing version: /g'
user/ User manuals
po/ Translated manual node names
fr/ es/ de/ Docs translated to French, Spanish, German, resp.
- buildscripts/ Scripts for the build process
elisp/ Emacs LilyPond mode and syntax coloring
flower/ A simple C++ library
input/ Music input examples
po/ Translations for binaries and end-user scripts
ps/ PostScript library files
python/ Python modules, MIDI module
+ aux/ Python modules used by maintenance scripts
+ or in the build process
scm/ Scheme sources for LilyPond and subroutine files
scripts/ End-user scripts
+ aux/ Scripts for maintaining the sources and scripts
+ for the build process that need not be built
+ build/ Scripts for the build process that must be built
stepmake/ Generic make subroutine files
tex/ TeX and texinfo library files
vim/ Vi(M) LilyPond mode and syntax coloring
+++ /dev/null
-# -*-python-*-
-
-'''
-Experimental scons (www.scons.org) building.
-
-Usage
-
- scons TARGET
-
-build from source directory ./TARGET (not recursive)
-
-Configure, build
-
- scons [config] # configure
- scons # build all
-
-Run from build tree
-
- run=$(pwd)/out-scons/usr
- export LOCALE=$run/share/locale
- export TEXMF='{'$run/share/lilypond,$(kpsexpand '$TEXMF')'}'
- PATH=$run/bin:$PATH
-
- #optionally, if you do not use custom.py below
- #export LILYPOND_DATADIR=$run/share/lilypond/<VERSION>
-
- lilypond input/simple
-
-Other targets
- scons mf-essential # build minimal mf stuff
-
- scons doc # build web doc
- scons config # reconfigure
- scons install # install
- scons -c # clean
- scons -h # help
-
- scons / # build *everything* (including installation)
-
-Options (see scons -h)
- scons build=DIR # clean srcdir build, output below DIR
- scons out=DIR # write output for alterative config to DIR
-
-Debugging
- scons --debug=dtree
- scons --debug=explain
- scons verbose=1
-
-Optional custom.py
-
-import os
-out='out-scons'
-optimising=0
-debugging=1
-gui=1
-os.path.join (os.getcwd (), '=install')
-prefix=os.path.join (os.environ['HOME'], 'usr', 'pkg', 'lilypond')
-
-'''
-
-
-# TODO:
-
-# * reality check:
-# - too many stages in Environments setup
-# (see also buildscripts/builders.py)
-# - Home-brew scons.cach configuration caching
-# - Home-brew source tarball generating -- [why] isn't that in SCons?
-
-# * usability and documentation for "./configure; make" users
-
-# * too much cruft in toplevel SConstruct
-
-# * (optional) operation without CVS directories, from tarball
-
-# * more program configure tests, actually use full executable name
-
-# * install doc
-
-# * split doc target: doc input examples mutopia?
-
-# * grep FIXME $(find . -name 'S*t')
-
-# * drop "fast"
-
-import re
-import glob
-import os
-import string
-import sys
-import stat
-import shutil
-
-# duh, we need 0.95.1
-EnsureSConsVersion (0, 96, 92)
-
-usage = r'''Usage:
-[ENVVAR=VALUE]... scons [OPTION=VALUE]... [TARGET|DIR]...
-
-TARGETS: clean, config, doc, dist, install, mf-essential, po-update,
- realclean, release, sconsclean, tar, TAGS
-
-ENVVARS: BASH, CCFLAGS, CC, CXX, LIBS, PYTHON, SH...
- (see SConstruct:config_vars)
-
-OPTIONS:
-'''
-
-
-config_cache = 'scons.cache'
-if os.path.exists (config_cache) and 'config' in COMMAND_LINE_TARGETS:
- os.unlink (config_cache)
-
-# All config_vars can be set as ENVVAR, eg:
-#
-# CXX=g++-4.0 GS=~/usr/pkg/gs/bin/gs scons config
-#
-# append test_program variables automagically?
-config_vars = [
- 'BASH',
- 'BYTEORDER',
- 'CC',
- 'CCFLAGS',
- 'CPPPATH',
- 'CPPDEFINES',
- 'CXX',
- 'CXXFLAGS',
- 'DEFINES',
- 'DVIPS',
- 'FONTFORGE',
- 'GCC',
- 'GXX',
- 'GS',
- 'LIBS',
- 'LINKFLAGS',
- 'MF',
- 'PERL',
- 'PYTHON',
- 'SH',
- ]
-
-# Put your favourite stuff in custom.py
-opts = Options ([config_cache, 'custom.py'], ARGUMENTS)
-opts.Add ('prefix', 'Install prefix', '/usr/')
-opts.Add ('out', 'Output directory', 'out-scons')
-opts.Add ('build', 'Build directory', '.')
-opts.Add ('DESTDIR', 'DESTDIR prepended to prefix', '')
-opts.AddOptions (
- BoolOption ('warnings', 'compile with -Wall and similiar',
- 1),
- BoolOption ('debugging', 'compile with debugging symbols',
- 0),
- BoolOption ('optimising', 'compile with optimising',
- 1),
- BoolOption ('shared', 'build shared libraries',
- 0),
- BoolOption ('static', 'build static libraries',
- 1),
- BoolOption ('gui', 'build with GNOME backend (EXPERIMENTAL)',
- 0),
- BoolOption ('verbose', 'run commands with verbose flag',
- 0),
- BoolOption ('checksums', 'use checksums instead of timestamps',
- 0),
- BoolOption ('fast', 'use timestamps, implicit cache, prune CPPPATH',
- 0),
- )
-
-srcdir = Dir ('.').srcnode ().abspath
-#ugh
-sys.path.append (os.path.join (srcdir, 'stepmake', 'bin'))
-
-try:
- import packagepython
- packagepython.Package (srcdir)
- packagepython.version_tuple_to_str (package.version)
-except:
- print '*** FIXME: no packagepython. setting version to 1.0'
- class Package:
- name = 'lilypond'
- release_dir = '.'
- package = Package
- version = '1.0'
-
-ENV = { 'PYTHONPATH': '' }
-for key in ['GUILE_LOAD_PATH', 'LD_LIBRARY_PATH', 'PATH', 'PKG_CONFIG_PATH',
- 'PYTHONPATH', 'TEXMF']:
- if os.environ.has_key (key):
- ENV[key] = os.environ[key]
-
-ENV['PYTHONPATH'] = os.path.join (srcdir, 'python') + ':' + ENV['PYTHONPATH']
-
-env = Environment (
- ENV = ENV,
- BYTEORDER = sys.byteorder.upper (),
- CC = '$GCC',
- CXX = '$GXX',
- CPPDEFINES = '-DHAVE_CONFIG_H',
- MAKEINFO = 'LANG= makeinfo',
- MF_TO_TABLE_PY = srcdir + '/buildscripts/mf-to-table.py',
-
- PKG_CONFIG_PATH = [os.path.join (os.environ['HOME'],
- 'usr/pkg/gnome/lib'),
- os.path.join (os.environ['HOME'],
- 'usr/pkg/pango/lib')],
- GZIP='-9v',
- MFMODE = 'ljfour',
- TOPLEVEL_VERSION = version,
- )
-
-Help (usage + opts.GenerateHelpText (env))
-
-# Add all config_vars to opts, so that they will be read and saved
-# together with the other configure options.
-map (lambda x: opts.AddOptions ((x,)), config_vars)
-opts.Update (env)
-
-for key in config_vars:
- if os.environ.has_key (key):
- env[key] = os.environ[key]
-
-if env['fast']:
- # Usability switch (Anthony Roach).
- # See http://www.scons.org/cgi-bin/wiki/GoFastButton
- # First do: scons realclean .
- env['checksums'] = 0
- SetOption ('max_drift', 1)
- SetOption ('implicit_cache', 1)
-elif env['checksums']:
- # Always use checksums (makes more sense than timestamps).
- SetOption ('max_drift', 0)
- # Using *content* checksums prevents rebuilds after
- # [re]configure if config.hh has not changed. Too bad that it
- # is unusably slow.
- TargetSignatures ('content')
-
-absbuild = Dir (env['build']).abspath
-outdir = os.path.join (Dir (env['build']).abspath, env['out'])
-run_prefix = os.path.join (absbuild, os.path.join (env['out'], 'usr'))
-
-
-config_hh = os.path.join (outdir, 'config.hh')
-version_hh = os.path.join (outdir, 'version.hh')
-
-env.Alias ('config', config_cache)
-
-cachedir = os.path.join (outdir, 'build-cache')
-
-if not os.path.exists (cachedir):
- os.makedirs (cachedir)
-
-CacheDir (cachedir)
-
-# No need to set $LILYPOND_DATADIR to run lily, but cannot install...
-if env['debugging'] and not 'install' in COMMAND_LINE_TARGETS:
- env['prefix'] = run_prefix
-
-prefix = env['prefix']
-bindir = os.path.join (prefix, 'bin')
-sharedir = os.path.join (prefix, 'share')
-libdir = os.path.join (prefix, 'lib')
-libdir_package = os.path.join (libdir, package.name)
-libdir_package_version = os.path.join (libdir_package, version)
-localedir = os.path.join (sharedir, 'locale')
-sharedir_doc_package = os.path.join (sharedir, 'doc', package.name)
-sharedir_package = os.path.join (sharedir, package.name)
-sharedir_package_version = os.path.join (sharedir_package, version)
-lilypondprefix = sharedir_package_version
-
-# junkme
-env.Append (
- absbuild = absbuild,
- srcdir = srcdir,
- )
-
-
-
-def symlink_tree (target, source, env):
- def mkdirs (dir):
- def mkdir (dir):
- if not dir:
- os.chdir (os.sep)
- return
- if not os.path.isdir (dir):
- if os.path.exists (dir):
- os.unlink (dir)
- os.mkdir (dir)
- os.chdir (dir)
- map (mkdir, string.split (dir, os.sep))
- def symlink (src, dst):
- os.chdir (absbuild)
- dir = os.path.dirname (dst)
- mkdirs (dir)
- if src[0] == '#':
- frm = os.path.join (srcdir, src[1:])
- else:
- depth = len (string.split (dir, '/'))
- if src.find ('@') > -1:
- frm = os.path.join ('../' * depth,
- string.replace (src, '@',
- env['out']))
- else:
- frm = os.path.join ('../' * depth, src,
- env['out'])
- if src[-1] == '/':
- frm = os.path.join (frm, os.path.basename (dst))
- if env['verbose']:
- print 'ln -s %s -> %s' % (frm, os.path.basename (dst))
- os.symlink (frm, os.path.basename (dst))
- shutil.rmtree (run_prefix)
- prefix = os.path.join (env['out'], 'usr')
- map (lambda x: symlink (x[0], os.path.join (prefix,
- x[1] % {'ver' : version})),
- # ^# := source dir
- # @ := out
- # /$ := add dst file_name
- (('python', 'lib/lilypond/python'),
- # ugh
- ('python', 'share/lilypond/%(ver)s/python'),
- ('lily/', 'bin/lilypond'),
- ('scripts/', 'bin/convert-ly'),
- ('scripts/', 'bin/lilypond-book'),
- ('scripts/', 'bin/ps2png'),
- ('mf', 'share/lilypond/%(ver)s/dvips/mf-out'),
- ('#ps/music-drawing-routines.ps',
- 'share/lilypond/%(ver)s/tex/music-drawing-routines.ps'),
- ('mf', 'share/lilypond/%(ver)s/otf'),
- ('mf', 'share/lilypond/%(ver)s/tfm'),
- ('tex', 'share/lilypond/%(ver)s/tex/enc'),
- ('#mf', 'share/lilypond/%(ver)s/fonts/mf'),
- ('mf', 'share/lilypond/%(ver)s/fonts/map'),
- ('mf', 'share/lilypond/%(ver)s/fonts/otf'),
- ('mf', 'share/lilypond/%(ver)s/fonts/tfm'),
- ('mf', 'share/lilypond/%(ver)s/fonts/type1'),
- ('#tex', 'share/lilypond/%(ver)s/tex/source'),
- ('tex', 'share/lilypond/%(ver)s/tex/tex-out'),
- ('mf', 'share/lilypond/%(ver)s/tex/mf-out'),
- ('#ly', 'share/lilypond/%(ver)s/ly'),
- ('#scm', 'share/lilypond/%(ver)s/scm'),
- ('#scripts', 'share/lilypond/%(ver)s/scripts'),
- ('#ps', 'share/lilypond/%(ver)s/ps'),
- ('po/@/nl.mo', 'share/locale/nl/LC_MESSAGES/lilypond.mo'),
- ('elisp', 'share/lilypond/%(ver)s/elisp')))
-
- print "FIXME: BARF BARF BARF"
- os.chdir (absbuild)
- out = env['out']
- ver = version
- prefix = os.path.join (env['out'], 'usr/share/lilypond/%(ver)s/fonts'
- % vars ())
- for ext in ('enc', 'map', 'otf', 'svg', 'tfm', 'pfa'):
- dir = os.path.join (absbuild, prefix, ext)
- os.system ('rm -f ' + dir)
- mkdirs (dir)
- os.chdir (dir)
- os.system ('ln -s ../../../../../../../mf/%(out)s/*.%(ext)s .'
- % vars ())
- os.chdir (srcdir)
-
-def configure (target, source, env):
- dre = re.compile ('\n(200[0-9]{5})')
- vre = re.compile ('.*?\n[^-.0-9]*([0-9][0-9]*\.[0-9]([.0-9]*[0-9])*)',
- re.DOTALL)
- def get_version (program):
- command = '(pkg-config --modversion %(program)s || %(program)s --version || %(program)s -V) 2>&1' % vars ()
- pipe = os.popen (command)
- output = pipe.read ()
- if pipe.close ():
- return None
- splits = re.sub ('^|\s', '\n', output)
- date_hack = re.sub (dre, '\n0.0.\\1', splits)
- m = re.match (vre, date_hack)
- v = m.group (1)
- if v[-1] == '\n':
- v = v[:-1]
- return string.split (v, '.')
-
- def test_version (lst, full_name, minimal, description, package):
- program = os.path.basename (full_name)
- sys.stdout.write ('Checking %s version... ' % program)
- actual = get_version (program)
- if not actual:
- print 'not found'
- lst.append ((description, package, minimal, program,
- 'not installed'))
- return 0
- print string.join (actual, '.')
- if map (string.atoi, actual) \
- < map (string.atoi, string.split (minimal, '.')):
- lst.append ((description, package, minimal, program,
- string.join (actual, '.')))
- return 0
- return 1
-
- def test_program (lst, program, minimal, description, package):
- key = program.upper ()
- if key.find ('+-'):
- key = re.sub ('\+', 'X', key)
- key = re.sub ('-', '_', key)
- sys.stdout.write ('Checking for %s ... ' % program)
- if env.has_key (key):
- f = env[key]
- sys.stdout.write ('(cached) ')
- else:
- f = WhereIs (program)
- env[key] = f
- if not f:
- print 'not found'
- lst.append ((description, package, minimal, program,
- 'not installed'))
- return 0
- print f
- return test_version (lst, program, minimal, description, package)
-
- def test_lib (lst, program, minimal, description, package):
- # FIXME: test for Debian or RPM (or -foo?) based dists
- # to guess (or get correct!: apt-cache search?)
- # package name.
- #if os.system ('pkg-config --atleast-version=0 freetype2'):
- # barf
- if test_version (lst, program, minimal, description,
- 'lib%(package)s-dev or %(package)s-devel'
- % vars ()):
- env.ParseConfig ('pkg-config --cflags --libs %(program)s'
- % vars ())
- return 1
- return 0
-
- required = []
- test_program (required, 'bash', '2.0', 'Bash', 'bash')
- test_program (required, 'gcc', '4.0', 'GNU C compiler', 'gcc')
- test_program (required, 'g++', '4.0.5', 'GNU C++ compiler', 'g++')
- test_program (required, 'guile-config', '1.8', 'GUILE development',
- 'libguile-dev or guile-devel')
- test_program (required, 'mf', '0.0', 'Metafont', 'tetex-bin')
- test_program (required, 'python', '2.1', 'Python (www.python.org)',
- 'python')
- # Silly, and breaks with /bin/sh == dash
- #test_program (required, 'sh', '0.0', 'Bourne shell', 'sh')
-
- optional = []
- # Do not use bison 1.50 and 1.75.
- #test_program (optional, 'foo', '2.0', 'Foomatic tester', 'bar')
- test_program (optional, 'bison', '1.25', 'Bison -- parser generator',
- 'bison')
- test_program (optional, 'fontforge', '0.0.20050624', 'FontForge',
- 'fontforge')
- test_program (optional, 'flex', '0.0', 'Flex -- lexer generator',
- 'flex')
- test_program (optional, 'guile', '1.8', 'GUILE scheme', 'guile')
- test_program (optional, 'gs', '8.15',
- 'Ghostscript PostScript interpreter',
- 'gs or gs-afpl or gs-esp or gs-gpl')
- test_program (optional, 'makeinfo', '4.8', 'Makeinfo tool', 'texinfo')
- test_program (optional, 'perl', '4.0',
- 'Perl practical efficient readonly language', 'perl')
-
- def CheckYYCurrentBuffer (context):
- context.Message ('Checking for yy_current_buffer... ')
- ret = conf.TryCompile ("""using namespace std;
- #include <FlexLexer.h>
- class yy_flex_lexer: public yyFlexLexer
- {
- public:
- yy_flex_lexer ()
- {
- yy_current_buffer = 0;
- }
- };""", '.cc')
- context.Result (ret)
- return ret
-
- conf = Configure (env, custom_tests = { 'CheckYYCurrentBuffer'
- : CheckYYCurrentBuffer })
-
- defines = {
- 'DIRSEP' : "'%s'" % os.sep,
- 'PATHSEP' : "'%s'" % os.pathsep,
- 'PACKAGE': '"%s"' % package.name,
- 'DATADIR' : '"%s"' % sharedir,
- 'PACKAGE_DATADIR' : '"%s"' % sharedir_package,
- 'LOCALEDIR' : '"%s"' %localedir,
- }
- conf.env.Append (DEFINES = defines)
-
- command = r"""python -c 'import sys; sys.stdout.write ("%s/include/python%s" % (sys.prefix, sys.version[:3]))'""" #"
- PYTHON_INCLUDE = os.popen (command).read ()#[:-1]
- if env['fast']:
- env.Append (CCFLAGS = ['-I%s' % PYTHON_INCLUDE])
- else:
- env.Append (CPPPATH = [PYTHON_INCLUDE])
-
- headers = ('assert.h', 'grp.h', 'libio.h', 'pwd.h',
- 'sys/stat.h', 'utf8/wchar.h', 'wchar.h', 'Python.h')
- for i in headers:
- if conf.CheckCHeader (i):
- key = re.sub ('[./]', '_', 'HAVE_' + string.upper (i))
- conf.env['DEFINES'][key] = 1
-
- ccheaders = ('sstream',)
- for i in ccheaders:
- if conf.CheckCXXHeader (i):
- key = re.sub ('[./]', '_', 'HAVE_' + string.upper (i))
- conf.env['DEFINES'][key] = 1
-
- functions = ('chroot', 'fopencookie', 'funopen',
- 'gettext', 'isinf',
- 'mbrtowc', 'memmem', 'snprintf', 'vsnprintf', 'wcrtomb')
- for i in functions:
- if 0 or conf.CheckFunc (i):
- key = re.sub ('[./]', '_', 'HAVE_' + string.upper (i))
- conf.env['DEFINES'][key] = 1
-
- if conf.CheckYYCurrentBuffer ():
- conf.env['DEFINES']['HAVE_FLEXLEXER_YY_CURRENT_BUFFER'] = 1
-
- if conf.CheckLib ('dl'):
- pass
-
- if env['fast']:
- cpppath = []
- if env.has_key ('CPPPATH'):
- cpppath = env['CPPPATH']
-
- ## FIXME: linkage, check for libguile.h and scm_boot_guile
- #this could happen after flower...
- env.ParseConfig ('guile-config compile')
-
- test_program (required, 'pkg-config', '0.9.0',
- 'pkg-config library compile manager', 'pkg-config')
- if test_lib (required, 'freetype2', '0.0',
- 'Development files for FreeType 2 font engine',
- 'freetype6'):
- conf.env['DEFINES']['HAVE_FREETYPE2'] = '1'
-
- if test_lib (required, 'pangoft2', '1.6.0',
- 'Development files for pango, with FreeType2',
- 'pango1.0'):
- conf.env['DEFINES']['HAVE_PANGO_FT2'] = '1'
-
- if test_lib (optional, 'fontconfig', '2.2.0',
- 'Development files for fontconfig', 'fontconfig1'):
- conf.env['DEFINES']['HAVE_FONTCONFIG'] = '1'
-
- #this could happen only for compiling pango-*
- if env['gui']:
- test_lib (required, 'gtk+-2.0', '2.4.0',
- 'Development files for GTK+', 'gtk2.0')
-
- if env['fast']:
- # Using CCFLAGS = -I<system-dir> rather than CPPPATH = [
- # <system-dir>] speeds up SCons
- env['CCFLAGS'] += map (lambda x: '-I' + x,
- env['CPPPATH'][len (cpppath):])
- env['CPPPATH'] = cpppath
-
- if required:
- print
- print '********************************'
- print 'Please install required packages'
- for i in required:
- print '%s: %s-%s or newer (found: %s %s)' % i
- Exit (1)
-
- if optional:
- print
- print '*************************************'
- print 'Consider installing optional packages'
- for i in optional:
- print '%s: %s-%s or newer (found: %s %s)' % i
-
- return conf.Finish ()
-
-def config_header (target, source, env):
- config = open (str (target[0]), 'w')
- for i in sorted (env['DEFINES'].keys ()):
- config.write ('#define %s %s\n' % (i, env['DEFINES'][i]))
- config.close ()
-env.Command (config_hh, config_cache, config_header)
-
-# hmm?
-def xuniquify (lst):
- n = []
- for i in lst:
- if not i in n:
- n.append (i)
- lst = n
- return lst
-
-def uniquify (lst):
- d = {}
- n = len (lst)
- i = 0
- while i < n:
- if not d.has_key (lst[i]):
- d[lst[i]] = 1
- i += 1
- else:
- del lst[i]
- n -= 1
- return lst
-
-def uniquify_config_vars (env):
- for i in config_vars:
- if env.has_key (i) and type (env[i]) == type ([]):
- env[i] = uniquify (env[i])
-
-def save_config_cache (env):
- ## FIXME: Is this smart, using option cache for saving
- ## config.cache? I cannot seem to find the official method.
- uniquify_config_vars (env)
- opts.Save (config_cache, env)
-
- if 'config' in COMMAND_LINE_TARGETS:
- sys.stdout.write ('\n')
- sys.stdout.write ('LilyPond configured')
- sys.stdout.write ('\n')
- sys.stdout.write ('Now run')
- sys.stdout.write ('\n')
- sys.stdout.write (' scons [TARGET|DIR]...')
- sys.stdout.write ('\n')
- sys.stdout.write ('\n')
- sys.stdout.write ('Examples:')
- sys.stdout.write ('\n')
- sys.stdout.write (' scons lily # build lilypond')
- sys.stdout.write ('\n')
- sys.stdout.write (' scons all # build everything')
- sys.stdout.write ('\n')
- sys.stdout.write (' scons doc # build documentation')
- sys.stdout.write ('\n')
- ## TODO
- ## sys.stdout.write (' scons prefix=/usr DESTDIR=/tmp/pkg all install')
- ## sys.stdout.write ('\n')
- Exit (0)
- elif not env['checksums']:
- # When using timestams, config.hh is NEW. The next
- # build triggers recompilation of everything. Exiting
- # here makes SCons use the actual timestamp for config.hh
- # and prevents recompiling everything the next run.
- command = sys.argv[0] + ' ' + string.join (COMMAND_LINE_TARGETS)
- sys.stdout.write ('Running %s ... ' % command)
- sys.stdout.write ('\n')
- s = os.system (command)
- Exit (s)
-
-# WTF?
-# scons: *** Calling Configure from Builders is not supported.
-# env.Command (config_cache, None, configure)
-if not os.path.exists (config_cache) \
- or (os.stat ('SConstruct')[stat.ST_MTIME]
- > os.stat (config_cache)[stat.ST_MTIME]):
- env = configure (None, None, env)
- save_config_cache (env)
-elif env['checksums']:
- # just save everything
- save_config_cache (env)
-
-#urg how does #/ subst work?
-Export ('env')
-SConscript ('buildscripts/builder.py')
-
-env.PrependENVPath ('PATH',
- os.path.join (env['absbuild'], env['out'], 'usr/bin'))
-
-LILYPOND_DATADIR = os.path.join (run_prefix, 'share/lilypond/', version)
-
-if not os.path.exists (LILYPOND_DATADIR):
- os.makedirs (LILYPOND_DATADIR)
-
-env.Command (LILYPOND_DATADIR, ['#/SConstruct', '#/VERSION'], symlink_tree)
-env.Depends ('lily', LILYPOND_DATADIR)
-
-env.Append (ENV = {
- 'LILYPOND_DATADIR' : LILYPOND_DATADIR,
- 'TEXMF' : '{$LILYPOND_DATADIR,'
- + os.popen ('kpsexpand \$TEXMF').read ()[:-1] + '}',
- })
-
-BUILD_ABC2LY = '${set__x}$PYTHON $srcdir/scripts/abc2ly.py'
-BUILD_LILYPOND = '$absbuild/lily/$out/lilypond ${__verbose}'
-BUILD_LILYPOND_BOOK = '$PYTHON $srcdir/scripts/lilypond-book.py ${__verbose}'
-
-if env['verbose'] and env['verbose'] != '0':
- env['__verbose'] = ' --verbose'
- env['set__x'] = 'set -x;'
-
-# post-option environment-update
-env.Append (
- bindir = bindir,
- sharedir = sharedir,
- lilypond_datadir = sharedir_package,
- localedir = localedir,
- local_lilypond_datadir = sharedir_package_version,
- lilypondprefix = lilypondprefix,
- sharedir_package = sharedir_package,
- sharedir_doc_package = sharedir_doc_package,
- sharedir_package_version = sharedir_package_version,
- libdir_package = libdir_package,
- libdir_package_version = libdir_package_version,
-
- LILYPOND = BUILD_LILYPOND,
- ABC2LY = BUILD_ABC2LY,
- LILYPOND_BOOK = BUILD_LILYPOND_BOOK,
- LILYPOND_BOOK_FORMAT = 'texi-html',
- MAKEINFO_FLAGS = '--css-include=$srcdir/Documentation/texinfo.css',
- )
-
-env.Append (CCFLAGS = ['-pipe', '-Wno-pmf-conversions'])
-if env['debugging']:
- env.Append (CCFLAGS = ['-g'])
-if env['optimising']:
- env.Append (CCFLAGS = '-O2')
-if env['warnings']:
- env.Append (CCFLAGS = ['-W', '-Wall'])
- env.Append (CXXFLAGS = ['-Wconversion'])
-
-# ugr,huh?
-env.Append (LINKFLAGS = ['-Wl,--export-dynamic'])
-# FIXME: ParseConfig ignores -L flag?
-env.Append (LINKFLAGS = ['-L/usr/X11R6/lib'])
-
-## Explicit target and dependencies
-
-if 'clean' in COMMAND_LINE_TARGETS:
- # ugh: prevent reconfigure instead of clean
- os.system ('touch %s' % config_cache)
-
- command = sys.argv[0] + ' -c .'
- sys.stdout.write ('Running %s ... ' % command)
- sys.stdout.write ('\n')
- s = os.system (command)
- if os.path.exists (config_cache):
- os.unlink (config_cache)
- Exit (s)
-
-if 'sconsclean' in COMMAND_LINE_TARGETS:
- command = 'rm -rf scons.cache $(find . -name ".scon*")'
- s = os.system (command)
- if os.path.exists (config_cache):
- os.unlink (config_cache)
- Exit (s)
-
-if 'realclean' in COMMAND_LINE_TARGETS:
- command = 'rm -rf $(find . -name "out-scons" -o -name ".scon*")'
- sys.stdout.write ('Running %s ... ' % command)
- sys.stdout.write ('\n')
- s = os.system (command)
- if os.path.exists (config_cache):
- os.unlink (config_cache)
- Exit (s)
-
-# Declare SConscript phonies
-env.Alias ('minimal', config_cache)
-
-if 0:
- env.Alias ('mf-essential', config_cache)
- env.Alias ('minimal', ['python', 'lily', 'mf-essential'])
- env.Alias ('all', ['minimal', 'mf', '.'])
-
-else:
- env.Alias ('minimal', ['python', 'lily', 'mf'])
- env.Alias ('all', ['minimal', '.'])
-
-
-# Do we want the doc/web separation?
-env.Alias ('doc',
- ['minimal',
- 'Documentation',
- 'Documentation/user',
- 'Documentation/topdocs',
- 'Documentation/bibliography',
- 'input'])
-
-# Without target arguments, do minimal build
-if not COMMAND_LINE_TARGETS:
- env.Default (['minimal'])
-
-# GNU Make rerouting compat:
-env.Alias ('web', 'doc')
-
-
-env.Command (version_hh, '#/VERSION',
- '$PYTHON ./stepmake/bin/make-version.py VERSION > $TARGET')
-
-# post-config environment update
-env.Append (
- run_prefix = run_prefix,
- LILYPOND_DATADIR = LILYPOND_DATADIR,
-
- # FIXME: move to lily/SConscript?
- LIBPATH = [os.path.join (absbuild, 'flower', env['out'])],
- CPPPATH = [outdir, ],
- LILYPOND_PATH = ['.',
- '$srcdir/input',
- '$srcdir/input/regression',
- '$srcdir/input/test',
- '$srcdir/input/tutorial',
- '$srcdir/Documentation/user',
- '$absbuild/mf/$out',
-# os.path.join (absbuild, 'Documentation',
-# env['out']),
-# os.path.join (absbuild, 'Documentation/user',
-# env['out']),
- ],
- MAKEINFO_PATH = ['.', '$srcdir/Documentation/user',
- '$absbuild/Documentation/user/$out'],
- )
-
-#### dist, tar
-def plus (a, b):
- a + b
-
-def cvs_entry_is_dir (line):
- return line[0] == 'D' and line[-2] == '/'
-
-def cvs_entry_is_file (line):
- return line[0] == '/' and line[-2] == '/'
-
-def cvs_dirs (dir):
- entries = os.path.join (dir, 'CVS/Entries')
- if not os.path.exists (entries):
- return []
- entries = open (entries).readlines ()
- dir_entries = filter (cvs_entry_is_dir, entries)
- dirs = map (lambda x: os.path.join (dir, x[2:x[2:].index ('/')+3]),
- dir_entries)
- return dirs + map (cvs_dirs, dirs)
-
-def cvs_files (dir):
- entries = os.path.join (dir, 'CVS/Entries')
- if not os.path.exists (entries):
- return []
- entries = open (entries).readlines ()
- file_entries = filter (cvs_entry_is_file, entries)
- files = map (lambda x: x[1:x[1:].index ('/')+1], file_entries)
- return map (lambda x: os.path.join (dir, x), files)
-
-def flatten (tree, lst):
- if type (tree) == type ([]):
- for i in tree:
- if type (i) == type ([]):
- flatten (i, lst)
- else:
- lst.append (i)
- return lst
-
-if os.path.isdir ('%(srcdir)s/CVS' % vars ()):
- subdirs = flatten (cvs_dirs ('.'), [])
-else:
- # ugh
- command = 'cd %(srcdir)s \
- && find . -name SConscript | sed s@/SConscript@@' % vars ()
- subdirs = string.split (os.popen (command).read ())
-
-if env['fast']\
- and 'all' not in COMMAND_LINE_TARGETS\
- and 'doc' not in COMMAND_LINE_TARGETS\
- and 'web' not in COMMAND_LINE_TARGETS\
- and 'install' not in COMMAND_LINE_TARGETS\
- and 'clean' not in COMMAND_LINE_TARGETS:
- subdirs = [ 'python',
- 'lily',
- 'flower',
- 'mf',
- ]
-
-if os.path.isdir ('%(srcdir)s/CVS' % vars ()):
- src_files = reduce (lambda x, y: x + y, map (cvs_files, subdirs))
-else:
- src_files = ['foobar']
-
-readme_files = ['AUTHORS', 'README', 'INSTALL', 'NEWS']
-txt_files = map (lambda x: x + '.txt', readme_files)
-
-
-#
-# speeds up build by +- 5%
-#
-if not env['fast']:
- foo = map (lambda x: env.TXT (x + '.txt',
- os.path.join ('Documentation/topdocs', x)),
- readme_files)
- tar_base = package.name + '-' + version
- tar_name = tar_base + '.tar.gz'
- ball_prefix = os.path.join (outdir, tar_base)
- tar_ball = os.path.join (outdir, tar_name)
-
- dist_files = src_files + txt_files
- ball_files = map (lambda x: os.path.join (ball_prefix, x), dist_files)
- map (lambda x: env.Depends (tar_ball, x), ball_files)
- map (lambda x: env.Command (os.path.join (ball_prefix, x), x,
- 'ln $SOURCE $TARGET'), dist_files)
- tar = env.Command (tar_ball, src_files,
- ['rm -f $$(find $TARGET.dir -name .sconsign)',
- 'tar czf $TARGET -C $TARGET.dir %s' % tar_base,])
- env.Alias ('tar', tar)
-
- dist_ball = os.path.join (package.release_dir, tar_name)
- env.Command (dist_ball, tar_ball,
- 'if [ -e $SOURCE -a -e $TARGET ]; then rm $TARGET; fi;' \
- + 'ln $SOURCE $TARGET')
- env.Depends ('dist', dist_ball)
- patch_name = os.path.join (outdir, tar_base + '.diff.gz')
- patch = env.PATCH (patch_name, tar_ball)
- env.Depends (patch_name, dist_ball)
- env.Alias ('release', patch)
-
-#### web
-if not env['fast']:
- web_base = os.path.join (outdir, 'web')
- web_ball = web_base + '.tar.gz'
- env['footify'] = 'MAILADDRESS=bug-lilypond@gnu.org $PYTHON stepmake/bin/add-html-footer.py --name=lilypond --version=$TOPLEVEL_VERSION'
- web_ext = ['.html', '.ly', '.midi', '.pdf', '.png', '.ps.gz', '.txt',]
- web_path = '-path "*/$out/*"' + string.join (web_ext, ' -or -path "*/$out/*"') + '-or -type l'
- env['web_path'] = web_path
- web_list = os.path.join (outdir, 'weblist')
- # compatible make heritits
- # fixme: generate in $outdir is cwd/builddir
- env.Command (web_list,
- ## Adding 'doc' dependency is correct, but takes
- ## > 5min extra if you have a peder :-)
- #'doc',
-
- '#/VERSION',
- ['$PYTHON buildscripts/mutopia-index.py -o examples.html ./',
- 'cd $absbuild && $footify $$(find . -name "*.html" -print)',
- 'cd $absbuild && rm -f $$(find . -name "*.html~" -print)',
- 'cd $absbuild && find Documentation input $web_path \
- > $TARGET',
- '''echo '<META HTTP-EQUIV="refresh" content="0;URL=Documentation/out-www/index.html">' > $absbuild/index.html''',
- '''echo '<html><body>Redirecting to the documentation index...</body></html>' >> $absbuild/index.html''',
- 'cd $absbuild && ls *.html >> $TARGET',])
- env.Command (web_ball, web_list,
- ['cat $SOURCE | tar -C $absbuild -czf $TARGET -T -',])
- #env.Alias ('web', web_ball)
- www_base = os.path.join (outdir, 'www')
- www_ball = www_base + '.tar.gz'
- env.Command (www_ball, web_ball,
- ['rm -rf $out/tmp',
- 'mkdir -p $absbuild/$out/tmp',
- 'tar -C $absbuild/$out/tmp -xzf $SOURCE',
- 'cd $absbuild/$out/tmp && for i in $$(find . -name "$out"); '
- + ' do mv $$i $$(dirname $$i)/out-www; done',
- 'tar -C $absbuild/$out/tmp -czf $TARGET .'])
- env.Alias ('web', www_ball)
-
-#### tags
-env.Append (
- ETAGSFLAGS = """--regex='{c++}/^LY_DEFINE *(\([^,]+\)/\\1/' \
- --regex='{c++}/^LY_DEFINE *([^"]*"\([^"]+\)"/\\1/'""")
-code_ext = ['.cc', '.hh', '.scm', '.tcc',]
-env.Command ('TAGS', filter (lambda x: os.path.splitext (x)[1] in code_ext,
- src_files),
- 'etags $ETAGSFLAGS $SOURCES')
-
-# Note: SConscripts are only needed in directories where something needs
-# to be done, building or installing
-for d in subdirs:
- if os.path.exists (os.path.join (d, 'SConscript')):
- b = os.path.join (env['build'], d, env['out'])
- # Support clean sourcetree build (--srcdir build)
- # and ./out build.
- if os.path.abspath (b) != os.path.abspath (d):
- env.BuildDir (b, d, duplicate = 0)
- SConscript (os.path.join (b, 'SConscript'))
-
-env.Command ('tree', ['#/VERSION', '#/SConstruct'], symlink_tree)
-Release 2.11
+Release 2.12
************
Maarten Hijzelendoorn
Marc Lanoiselée
Mark Polesky
+Matthieu Jacquot
Matthijs Frankeno
Martijn Vromans
Marnen Laibow-Koser
+++ /dev/null
-depth = ..
-
-STEPMAKE_TEMPLATES=script install po
-EXTRA_DIST_FILES=pfx2ttf.fontforge
-
-include $(depth)/make/stepmake.make
-
-# Should we install these? This should be handled by sysadmin or
-# packager but if she forgets...
-#INSTALLATION_OUT_SUFFIXES=1
-#INSTALLATION_OUT_DIR1=$(local_lilypond_datadir)/scripts
-#INSTALLATION_OUT_FILES1=$(outdir)/lilypond-login $(outdir)/lilypond-profile
-
-all: $(INSTALLATION_FILES)
-
+++ /dev/null
-# -*-python-*-
-
-Import ('env')
-sources = ['lilypond-profile.sh', 'lilypond-login.sh']
-gens = map (env.AT_COPY, sources)
+++ /dev/null
-#!@PYTHON@
-import os
-import sys
-import getopt
-import tempfile
-
-# usage:
-def usage ():
- print 'usage: %s [-s style] [-o <outfile>] BIBFILES...'
-
-(options, files) = getopt.getopt (sys.argv[1:], 's:o:', [])
-
-output = 'bib.html'
-style = 'long'
-
-for (o,a) in options:
- if o == '-h' or o == '--help':
- usage ()
- sys.exit (0)
- elif o == '-s' or o == '--style':
- style = a
- elif o == '-o' or o == '--output':
- output = a
- else:
- raise Exception ('unknown option: %s' % o)
-
-
-if style not in ['alpha','index','long','longp','long-pario','short','short-pario','split']:
- sys.stderr.write ("Unknown style \`%s'\n" % style)
-
-tempfile = tempfile.mktemp ('bib2html')
-
-if not files:
- usage ()
- sys.exit (2)
-
-
-def strip_extension (f, ext):
- (p, e) = os.path.splitext (f)
- if e == ext:
- e = ''
- return p + e
-
-nf = []
-for f in files:
- nf.append (strip_extension (f, '.bib'))
-
-files = ','.join (nf)
-
-open (tempfile + '.aux', 'w').write (r'''
-\relax
-\citation{*}
-\bibstyle{html-%(style)s}
-\bibdata{%(files)s}''' % vars ())
-
-cmd = "bibtex %s" % tempfile
-
-sys.stdout.write ("Invoking `%s'\n" % cmd)
-stat = os.system (cmd)
-if stat <> 0:
- sys.exit(1)
-
-
-#TODO: do tex -> html on output
-
-bbl = open (tempfile + '.bbl').read ()
-
-open (output, 'w').write (bbl)
-
-
-def cleanup (tempfile):
- for a in ['aux','bbl', 'blg']:
- os.unlink (tempfile + '.' + a)
-
-cleanup (tempfile)
-
+++ /dev/null
-#!/bin/sh
-
-if test "$1" == "--fresh"; then
- fresh=yes
-fi
-
-if test ! -f config-cov.make; then
- fresh=yes
-fi
-
-if test "$fresh" = "yes";
-then
- ./configure --enable-config=cov --disable-optimising \
- && make conf=cov -j2 clean \
- && perl -i~ -pe 's/-pipe /-fprofile-arcs -ftest-coverage -pipe /g' config-cov.make \
- && perl -i~ -pe 's/ -ldl / -lgcov -ldl /g' config-cov.make
-else
- find -name '*.gcda' -exec rm '{}' ';'
-fi
-
-mkdir -p scripts/out-cov/
-touch scripts/out-cov/midi2ly scripts/out-cov/midi2ly.1
-make conf=cov -j2 && \
- make conf=cov test-clean OUT_TEST=testcov LILYPOND_JOBS= && \
- make conf=cov test OUT_TEST=testcov LILYPOND_JOBS='-dtrace-scheme-coverage '
-
-if test "$?" != "0"; then
- tail -100 out-cov/test-run.log
- exit 1
-fi
-
-depth=../..
-resultdir=out/coverage-results
-
-rm -rf $resultdir
-mkdir $resultdir
-cd $resultdir
-
-ln $depth/lily/* .
-ln $depth/scm/*.scm .
-mv $depth/input/regression/out-testcov/*.scm.cov .
-ln $depth/ly/*.ly .
-ln $depth/lily/out-cov/*[ch] .
-mkdir include
-ln $depth/lily/include/* include/
-ln $depth/flower/include/* include/
-for a in *[cl] *.yy
-do
- gcov -o $depth/lily/out-cov/ -p $a > $a.gcov-summary
-done
-
-python $depth/buildscripts/coverage.py --uncovered *.cc > uncovered.txt
-python $depth/buildscripts/coverage.py --hotspots *.cc > hotspots.txt
-python $depth/buildscripts/coverage.py --summary *.cc > summary.txt
-python $depth/buildscripts/coverage.py --uncovered *.scm > uncovered-scheme.txt
-
-head -20 summary.txt
-
-cat <<EOF
-results in
-
- out/coverage-results/summary.txt
- out/coverage-results/uncovered.txt
- out/coverage-results/uncovered-scheme.txt
- out/coverage-results/hotspots.txt
-
-EOF
+++ /dev/null
-#!/bin/sh
-
-if test "$1" == "--fresh"; then
- fresh=yes
-fi
-
-if test ! -f config-prof.make; then
- fresh=yes
-fi
-
-if test "$fresh" = "yes";
-then
- ./configure --enable-config=prof --enable-optimising \
- && perl -i~ -pe 's/-pipe /-pg -pipe /g' config-prof.make \
- && perl -i~ -pe 's/ -ldl / -pg -ldl /g' config-prof.make
-fi
-
-make conf=prof -j2
-
-if test "$?" != "0"; then
- exit 2
-fi
-
-depth=../..
-resultdir=out/profile-results
-
-rm -rf $resultdir
-mkdir $resultdir
-cd $resultdir
-
-
-cat > long-score.ly << EOF
-\version "2.10.0"
-foo = \new Staff \new Voice \repeat unfold 50 \relative { c4 d8[ d16( e]~ e16[ e e) f] g8 }
-\score {
- \new ChoirStaff <<
- \foo \foo \foo \foo
- \foo \foo \foo \foo
-
- >>
- \midi {}
- \layout {}
-}
-EOF
-
-rm gmon.sum
-
-exe=$depth/out-prof/bin/lilypond
-
-## todo: figure out representative sample.
-files="wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 mozart-hrn-3 mozart-hrn-3 long-score"
-
-
-
-$exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
- -I $depth/input/mutopia/W.A.Mozart/ \
- $files
-
-
-for a in *.profile; do
- echo $a
- cat $a
-done
-
-echo 'running gprof'
-gprof $exe > profile
-
-exit 0
-
-
-## gprof -s takes forever.
-for a in seq 1 3; do
- for f in $files ; do
- $exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
- -I $depth/input/mutopia/W.A.Mozart/ \
- $f
-
- echo 'running gprof'
- if test -f gmon.sum ; then
- gprof -s $exe gmon.out gmon.sum
- else
- mv gmon.out gmon.sum
- fi
- done
-done
-
-gprof $exe gmon.sum > profile
+++ /dev/null
-# -*-python-*-
-
-import glob
-import os
-import string
-
-Import ('env')
-
-# utility
-
-def add_suffixes (target, source, env, target_suffixes, src_suffixes):
- base = os.path.splitext (str (target[0]))[0]
- return (target + map (lambda x: base + x, target_suffixes),
- source + map (lambda x: base + x, src_suffixes))
-
-# junkme; see _concat
-def join_path (path, infix=os.pathsep, prefix = ''):
- def dir (x):
- if x and x[0] == '#':
- return env['srcdir'] + x[1:]
- return x
- return string.join (map (lambda x: prefix + dir (x), path), infix)
-
-
-def src_glob (s):
- here = os.getcwd ()
- os.chdir (env.Dir ('.').srcnode ().abspath)
- result = glob.glob (s)
- os.chdir (here)
- return result
-
-Export ('src_glob')
-
-def base_glob (s):
- return map (lambda x: os.path.splitext (x)[0], src_glob (s))
-
-Export ('base_glob')
-
-def install (target, dir):
- dest = env['DESTDIR'] + dir
- if type (target) == type ([]):
- map (lambda x: env.Install (dir, x), target)
- else:
- env.Install (dir, target)
- env.Alias ('install', dir)
-
-Export ('install')
-
-def _fixme (s):
- x = string.replace (s, '#', env['srcdir'])
- x = string.replace (x, '@', env['absbuild'])
- return x
-
-# Clean separation between generic action + flags and actual
-# configuration and flags in environment for this build.
-
-# Generic builders could/should be part of SCons.
-
-
-HH = Builder (action = 'bison -d -o ${TARGET.base}.cc $SOURCE',
- suffix = '.hh', src_suffix = '.yy')
-env.Append (BUILDERS = {'HH' : HH})
-
-
-# Setup LilyPond environment. For the LilyPond build, we override
-# some of these commands in the ENVironment.
-
-lilypond_book_flags = '''--format=$LILYPOND_BOOK_FORMAT --process="lilypond -I$srcdir -I$srcdir/input/test $__verbose --backend=eps --formats=ps,png --header=texidoc -dinternal-type-checking -ddump-signatures -danti-alias-factor=2" '''
-
-env.Append (
- BSTINPUTS = '${SOURCE.dir}:${TARGET.dir}:',
- BIB2HTML = '$PYTHON $srcdir/buildscripts/bib2html.py',
- LILYOND_BOOK = 'lilypond-book',
- LILYPOND_BOOK_FORMAT = '',
- LILYPOND_BOOK_FLAGS = lilypond_book_flags,
- LILYPOND_PATH = [],
- # The SCons way around FOO_PATH:
- LILYPOND_INCFLAGS = '$( ${_concat(INCPREFIX, LILYPOND_PATH, INCSUFFIX, __env__)} $)',
-
- MAKEINFO_PATH = [],
- MAKEINFO_FLAGS = [],
- MAKEINFO_INCFLAGS = '$( ${_concat(INCPREFIX, MAKEINFO_PATH, INCSUFFIX, __env__, RDirs)} $)',
- #TEXI2DVI_FLAGS = [],
- _TEXI2DVI_FLAGS = '$( ${_concat(" ", TEXI2DVI_FLAGS,)} $)',
- )
-
-TXT =\
- Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCFLAGS\
- --no-split --no-headers $SOURCE',
- suffix = '.txt', src_suffix = '.texi')
-env.Append (BUILDERS = {'TXT': TXT})
-
-INFO =\
- Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCFLAGS $SOURCE',
- suffix = '.info', src_suffix = '.texi')
-env.Append (BUILDERS = {'INFO': INFO})
-
-HTML =\
- Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCLUDES\
- --html --no-split --no-headers $MAKEINFO_FLAGS $SOURCE',
-suffix = '.html', src_suffix = '.texi')
-env.Append (BUILDERS = {'HTML': HTML})
-
-TEXI =\
- Builder (action =
- '$LILYPOND_BOOK --output=${TARGET.dir} \
- --include=${TARGET.dir} $LILYPOND_INCFLAGS \
- --process="$LILYPOND $LILYPOND_INCFLAGS" \
- $LILYPOND_BOOK_FLAGS \
- $SOURCE',
- suffix = '.texi', src_suffix = '.tely')
-env.Append (BUILDERS = {'TEXI': TEXI})
-
-TEXIDVI =\
- Builder (action = 'cd ${TARGET.dir} && \
- texi2dvi --batch -I $srcdir/Documentation/user $_TEXI2DVI_FLAGS ${SOURCE.file}',
- suffix = '.dvi', src_suffix = '.texi')
-env.Append (BUILDERS = {'TEXIDVI': TEXIDVI})
-
-DVIPS =\
- Builder (action = 'TEXINPUTS=${TARGET.dir}:$$TEXINPUTS $DVIPS -o $TARGET $DVIPS_FLAGS $SOURCE',
- suffix = '.ps', src_suffix = '.dvi')
-env.Append (BUILDERS = {'DVIPS': DVIPS})
-
-DVIPDF =\
- Builder (action = 'TEXINPUTS=${TARGET.dir}:$$TEXINPUTS $DVIPS -o $TARGET -Ppdf $DVIPS_FLAGS $SOURCE',
- suffix = '.pdfps', src_suffix = '.dvi')
-env.Append (BUILDERS = {'DVIPDF': DVIPDF})
-
-PSPDF =\
- Builder (action = 'ps2pdf $PSPDF_FLAGS $SOURCE $TARGET',
- suffix = '.pdf', src_suffix = '.pdfps')
-env.Append (BUILDERS = {'PSPDF': PSPDF})
-
-PNG2EPS =\
- Builder (action = 'convert $SOURCE $TARGET',
- suffix = '.eps', src_suffix = '.png')
-env.Append (BUILDERS = {'PNG2EPS': PNG2EPS})
-
-EPS2PNG =\
- Builder (action = 'convert $SOURCE $TARGET',
- suffix = '.png', src_suffix = '.eps')
-env.Append (BUILDERS = {'EPS2PNG': EPS2PNG})
-
-def add_ps_target (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.ps'], source)
-
-lilypond =\
- Builder (action = '$LILYPOND --output=${TARGET.base} --include=${TARGET.dir} $SOURCE',
- suffix = '.pdf', src_suffix = '.ly')
-## emitter = add_ps_target)
-env.Append (BUILDERS = {'LilyPond': lilypond})
-
-ABC = Builder (action = '$ABC2LY --output=${TARGET} --strict $SOURCE',
- suffix = '.ly', src_suffix = '.abc')
-env.Append (BUILDERS = {'ABC': ABC})
-
-def add_log_target (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.log'], source)
-
-def add_tfm_target (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.tfm'], source)
-
-def add_lisp_enc_target (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.lisp', base + '.enc'],
- source)
-
-def add_cff_cffps_svg (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.cff', base + '.cff.ps', base + '.svg'],
- source)
-
-a = 'cd ${TARGET.dir} \
-&& MFINPUTS=.:${SOURCE.dir}:$srcdir/${SOURCE.dir}: \
-$MF "\\mode:=$MFMODE; nonstopmode; input ${SOURCE.filebase};" \
-| grep -v "@\|>>\|w:\|h:";'
-tfm = Builder (action = a, suffix = '.tfm', src_suffix = '.mf',
-# emitter = lambda t, s, e: add_suffixes (t, s, e, ['.log'], []))
- emitter = add_log_target)
-env.Append (BUILDERS = {'TFM': tfm})
-
-a = '$PYTHON $MF_TO_TABLE_PY \
---outdir=${TARGET.dir} \
---global-lisp=${TARGET.base}.otf-gtable \
---lisp=${TARGET.base}.lisp \
---enc=${TARGET.base}.enc \
-${TARGET.base}.log'
-gtable = Builder (action = a, suffix = '.otf-gtable', src_suffix = '.log',
- emitter = add_lisp_enc_target)
-env.Append (BUILDERS = {'GTABLE': gtable})
-
-def add_enc_src (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- #return (target, source + [base + '.enc'])
- return (target + [base + '.pfb', base + '.svg'], source + [base + '.enc'])
-
-def add_svg (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.svg'], source)
-
-# FIXME UGH, should fix --output option for mftrace
-a = 'cd ${TARGET.dir} && \
-if test -e ${SOURCE.filebase}.enc; then encoding="--encoding=${SOURCE.filebase}.enc"; fi; \
-MFINPUTS=$srcdir/mf:.: \
-$MFTRACE --formats=pfa,pfb,svg --simplify --keep-trying --no-afm \
-$$encoding $__verbose \
---include=${TARGET.dir} \
-${SOURCE.file}'
-
-pfa = Builder (action = a,
- suffix = '.pfa',
- src_suffix = '.mf',
- emitter = add_enc_src)
-env.Append (BUILDERS = {'PFA': pfa})
-
-a = ['(cd ${TARGET.dir} && $FONTFORGE -script ${SOURCE.file})',
-# '$PYTHON $srcdir/buildscripts/ps-embed-cff.py ${SOURCE.base}.cff $$(cat ${SOURCE.base}.fontname) ${SOURCE.base}.cff.ps',
- 'rm -f ${TARGET.dir}/*.scale.pfa']
-otf = Builder (action = a,
- suffix = '.otf',
- src_suffix = '.pe',
-# emitter = add_cff_cffps_svg
- emitter = add_svg
- )
-env.Append (BUILDERS = {'OTF': otf})
-
-
-# Specific builders
-
-env['DIFF_PY'] = '$srcdir/stepmake/bin/package-diff.py'
-a = '$PYTHON $DIFF_PY $NO__verbose --outdir=${TARGET.dir}'
-patch = Builder (action = a, suffix = '.diff', src_suffix = '.tar.gz')
-env.Append (BUILDERS = {'PATCH': patch})
-
-atvars = [
-'BASH',
-'DATE',
-'sharedstatedir',
-'GUILE',
-'bindir',
-'date',
-'datadir',
-'lilypond_datadir',
-'lilypond_libdir',
-'local_lilypond_datadir',
-'local_lilypond_libdir',
-'localedir',
-'PACKAGE',
-'package',
-'PATHSEP',
-'PERL',
-'prefix',
-'program_prefix',
-'program_suffix',
-'PYTHON',
-'SHELL',
-'TOPLEVEL_VERSION',
-'step-bindir',
-]
-
-def at_copy (target, source, env):
- n = str (source[0])
- s = open (n).read ()
- for i in atvars:
- if env.has_key (i):
- s = string.replace (s, '@%s@'% i, env[i])
- t = str (target[0])
- open (t, 'w').write (s)
- # wugh
- if os.path.basename (os.path.dirname (str (target[0]))) == 'bin':
- os.chmod (t, 0755)
-
-AT_COPY = Builder (action = at_copy, src_suffix = ['.in', '.py', '.sh',])
-env.Append (BUILDERS = {'AT_COPY': AT_COPY})
-
-MO = Builder (action = 'msgfmt -o $TARGET $SOURCE',
- suffix = '.mo', src_suffix = '.po')
-env.Append (BUILDERS = {'MO': MO})
-
-ugh = 'ln -f po/lilypond.pot ${TARGET.dir}/lilypond.po ; '
-a = ugh + 'xgettext --default-domain=lilypond --join \
---output-dir=${TARGET.dir} --add-comments \
---keyword=_ --keyword=_f --keyword=_i $SOURCES'
-PO = Builder (action = a, suffix = '.pot',
- src_suffix = ['.cc', '.hh', '.py'], multi = 1)
-env['potarget'] = os.path.join (env['absbuild'], 'po', env['out'],
- 'lilypond.pot')
-env['pocommand'] = a
-
-ugh = '; mv ${TARGET} ${SOURCE}'
-a = 'msgmerge ${SOURCE} ${SOURCE.dir}/lilypond.pot -o ${TARGET}' + ugh
-POMERGE = Builder (action = a, suffix = '.pom', src_suffix = '.po')
-env.Append (BUILDERS = {'POMERGE': POMERGE})
-
-a = 'BSTINPUTS=$BSTINPUTS $BIB2HTML -o $TARGET $SOURCE'
-BIB2HTML = Builder (action = a, suffix = '.html', src_suffix = '.bib')
-env.Append (BUILDERS = {'BIB2HTML': BIB2HTML})
-
-a = '$PYTHON $srcdir/buildscripts/lys-to-tely.py \
---name=${TARGET.base} --title="$TITLE" $SOURCES'
-LYS2TELY = Builder (action = a, suffix = '.tely', src_suffix = '.ly')
-env.Append (BUILDERS = {'LYS2TELY': LYS2TELY})
-
-
-def mutopia (ly=None, abc=None):
- e = env.Copy (
- LILYPOND_BOOK_FLAGS = lilypond_book_flags,
- )
-
- if not abc:
- abc = base_glob ('*.abc')
- if not ly:
- ly = base_glob ('*.ly') + map (e.ABC, abc)
- pdf = map (e.LilyPond, ly)
- env.Depends (pdf, ['#/lily', '#/mf'])
- env.Alias ('doc', pdf)
-
-Export ('mutopia')
-
-def collate (title = 'collated files'):
- ly = base_glob ('*.ly')
-
- e = env.Copy (
- TITLE = title,
- LILYPOND_BOOK_FLAGS = lilypond_book_flags,
- # __verbose = ' --verbose',
- )
- tely = e.LYS2TELY ('collated-files', ly)
- texi = e.TEXI (tely)
- env.Depends (texi, ['#/lily', '#/mf'])
- dvi = e.TEXIDVI (texi)
- pspdf = e.DVIPDF (dvi)
- pdf = e.PSPDF (pspdf)
- html = e.HTML (texi)
-
- env.Alias ('doc', pdf)
- env.Alias ('doc', html)
-
-Export ('collate')
-
-Export ('env')
+++ /dev/null
-#!@PYTHON@
-
-import subprocess
-import re
-import sys
-
-verbose = False
-
-def read_pipe (command):
- child = subprocess.Popen (command,
- stdout = subprocess.PIPE,
- stderr = subprocess.PIPE,
- shell = True)
- (output, error) = child.communicate ()
- code = str (child.wait ())
- if not child.stdout or child.stdout.close ():
- print "pipe failed: %(command)s" % locals ()
- if code != '0':
- error = code + ' ' + error
- return (output, error)
-
-revision_re = re.compile ('GIT [Cc]ommittish:\s+([a-f0-9]+)')
-vc_diff_cmd = 'git diff %(color_flag)s %(revision)s HEAD -- %(original)s | cat'
-
-def check_translated_doc (original, translated_file, translated_contents, color=False):
- m = revision_re.search (translated_contents)
- if not m:
- sys.stderr.write ('error: ' + translated_file + \
- ": no 'GIT committish: <hash>' found.\nPlease check " + \
- 'the whole file against the original in English, then ' + \
- 'fill in HEAD committish in the header.\n')
- sys.exit (1)
- revision = m.group (1)
-
- if color:
- color_flag = '--color'
- else:
- color_flag = '--no-color'
- c = vc_diff_cmd % vars ()
- if verbose:
- sys.stderr.write ('running: ' + c)
- return read_pipe (c)
+++ /dev/null
-#!@PYTHON@
-
-import sys
-import midi
-
-(h,tracks) = midi.parse (open (sys.argv[1]).read ())
-
-tracks = tracks[1:]
-
-for t in tracks:
- for e in t:
- print e
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-check_texi_refs.py
-Interactive Texinfo cross-references checking and fixing tool
-
-"""
-
-
-import sys
-import re
-import os
-import optparse
-import imp
-
-outdir = 'out-www'
-
-log = sys.stderr
-stdout = sys.stdout
-
-file_not_found = 'file not found in include path'
-
-warn_not_fixed = '*** Warning: this broken x-ref has not been fixed!\n'
-
-opt_parser = optparse.OptionParser (usage='check_texi_refs.py [OPTION]... FILE',
- description='''Check and fix \
-cross-references in a collection of Texinfo
-documents heavily cross-referenced each other.
-''')
-
-opt_parser.add_option ('-a', '--auto-fix',
- help="Automatically fix cross-references whenever \
-it is possible",
- action='store_true',
- dest='auto_fix',
- default=False)
-
-opt_parser.add_option ('-b', '--batch',
- help="Do not run interactively",
- action='store_false',
- dest='interactive',
- default=True)
-
-opt_parser.add_option ('-c', '--check-comments',
- help="Also check commented out x-refs",
- action='store_true',
- dest='check_comments',
- default=False)
-
-opt_parser.add_option ('-p', '--check-punctuation',
- help="Check punctuation after x-refs",
- action='store_true',
- dest='check_punctuation',
- default=False)
-
-opt_parser.add_option ("-I", '--include', help="add DIR to include path",
- metavar="DIR",
- action='append', dest='include_path',
- default=[os.path.abspath (os.getcwd ())])
-
-(options, files) = opt_parser.parse_args ()
-
-class InteractionError (Exception):
- pass
-
-
-manuals_defs = imp.load_source ('manuals_defs', files[0])
-manuals = {}
-
-def find_file (name, prior_directory='.'):
- p = os.path.join (prior_directory, name)
- out_p = os.path.join (prior_directory, outdir, name)
- if os.path.isfile (p):
- return p
- elif os.path.isfile (out_p):
- return out_p
-
- # looking for file in include_path
- for d in options.include_path:
- p = os.path.join (d, name)
- if os.path.isfile (p):
- return p
-
- # file not found in include_path: looking in `outdir' subdirs
- for d in options.include_path:
- p = os.path.join (d, outdir, name)
- if os.path.isfile (p):
- return p
-
- raise EnvironmentError (1, file_not_found, name)
-
-
-exit_code = 0
-
-def set_exit_code (n):
- global exit_code
- exit_code = max (exit_code, n)
-
-
-if options.interactive:
- try:
- import readline
- except:
- pass
-
- def yes_prompt (question, default=False, retries=3):
- d = {True: 'y', False: 'n'}.get (default, False)
- while retries:
- a = raw_input ('%s [default: %s]' % (question, d) + '\n')
- if a.lower ().startswith ('y'):
- return True
- if a.lower ().startswith ('n'):
- return False
- if a == '' or retries < 0:
- return default
- stdout.write ("Please answer yes or no.\n")
- retries -= 1
-
- def search_prompt ():
- """Prompt user for a substring to look for in node names.
-
-If user input is empty or matches no node name, return None,
-otherwise return a list of (manual, node name, file) tuples.
-
-"""
- substring = raw_input ("Enter a substring to search in node names \
-(press Enter to skip this x-ref):\n")
- if not substring:
- return None
- substring = substring.lower ()
- matches = []
- for k in manuals:
- matches += [(k, node, manuals[k]['nodes'][node][0])
- for node in manuals[k]['nodes']
- if substring in node.lower ()]
- return matches
-
-else:
- def yes_prompt (question, default=False, retries=3):
- return default
-
- def search_prompt ():
- return None
-
-
-ref_re = re.compile \
- ('@(ref|ruser|rlearning|rprogram|rglos)(?:\\{(?P<ref>[^,\\\\\\}]+?)|\
-named\\{(?P<refname>[^,\\\\]+?),(?P<display>[^,\\\\\\}]+?))\\}(?P<last>.)',
- re.DOTALL)
-node_include_re = re.compile (r'(?m)^@(node|include)\s+(.+?)$')
-
-whitespace_re = re.compile (r'\s+')
-line_start_re = re.compile ('(?m)^')
-
-def which_line (index, newline_indices):
- """Calculate line number of a given string index
-
-Return line number of string index index, where
-newline_indices is an ordered iterable of all newline indices.
-"""
- inf = 0
- sup = len (newline_indices) - 1
- n = len (newline_indices)
- while inf + 1 != sup:
- m = (inf + sup) / 2
- if index >= newline_indices [m]:
- inf = m
- else:
- sup = m
- return inf + 1
-
-
-comments_re = re.compile ('(?<!@)(@c(?:omment)? \
-.*?\\n|^@ignore\\n.*?\\n@end ignore\\n)', re.M | re.S)
-
-def calc_comments_boundaries (texinfo_doc):
- return [(m.start (), m.end ()) for m in comments_re.finditer (texinfo_doc)]
-
-
-def is_commented_out (start, end, comments_boundaries):
- for k in range (len (comments_boundaries)):
- if (start > comments_boundaries[k][0]
- and end <= comments_boundaries[k][1]):
- return True
- elif end <= comments_boundaries[k][0]:
- return False
- return False
-
-
-def read_file (f, d):
- s = open (f).read ()
- base = os.path.basename (f)
- dir = os.path.dirname (f)
-
- d['contents'][f] = s
-
- d['newline_indices'][f] = [m.end () for m in line_start_re.finditer (s)]
- if options.check_comments:
- d['comments_boundaries'][f] = []
- else:
- d['comments_boundaries'][f] = calc_comments_boundaries (s)
-
- for m in node_include_re.finditer (s):
- if m.group (1) == 'node':
- line = which_line (m.start (), d['newline_indices'][f])
- d['nodes'][m.group (2)] = (f, line)
-
- elif m.group (1) == 'include':
- try:
- p = find_file (m.group (2), dir)
- except EnvironmentError, (errno, strerror):
- if strerror == file_not_found:
- continue
- else:
- raise
- read_file (p, d)
-
-
-def read_manual (name):
- """Look for all node names and cross-references in a Texinfo document
-
-Return a (manual, dictionary) tuple where manual is the cross-reference
-macro name defined by references_dict[name], and dictionary
-has the following keys:
-
- 'nodes' is a dictionary of `node name':(file name, line number),
-
- 'contents' is a dictionary of file:`full file contents',
-
- 'newline_indices' is a dictionary of
-file:[list of beginning-of-line string indices],
-
- 'comments_boundaries' is a list of (start, end) tuples,
-which contain string indices of start and end of each comment.
-
-Included files that can be found in the include path are processed too.
-
-"""
- d = {}
- d['nodes'] = {}
- d['contents'] = {}
- d['newline_indices'] = {}
- d['comments_boundaries'] = {}
- manual = manuals_defs.references_dict.get (name, '')
- try:
- f = find_file (name + '.tely')
- except EnvironmentError, (errno, strerror):
- if not strerror == file_not_found:
- raise
- else:
- try:
- f = find_file (name + '.texi')
- except EnvironmentError, (errno, strerror):
- if strerror == file_not_found:
- sys.stderr.write (name + '.{texi,tely}: ' +
- file_not_found + '\n')
- return (manual, d)
- else:
- raise
-
- log.write ("Processing manual %s (%s)\n" % (f, manual))
- read_file (f, d)
- return (manual, d)
-
-
-log.write ("Reading files...\n")
-
-manuals = dict ([read_manual (name)
- for name in manuals_defs.references_dict.keys ()])
-
-ref_fixes = set ()
-bad_refs_count = 0
-fixes_count = 0
-
-def add_fix (old_type, old_ref, new_type, new_ref):
- ref_fixes.add ((old_type, old_ref, new_type, new_ref))
-
-
-def lookup_fix (r):
- found = []
- for (old_type, old_ref, new_type, new_ref) in ref_fixes:
- if r == old_ref:
- found.append ((new_type, new_ref))
- return found
-
-
-def preserve_linebreak (text, linebroken):
- if linebroken:
- if ' ' in text:
- text = text.replace (' ', '\n', 1)
- n = ''
- else:
- n = '\n'
- else:
- n = ''
- return (text, n)
-
-
-def choose_in_numbered_list (message, string_list, sep=' ', retries=3):
- S = set (string_list)
- S.discard ('')
- string_list = list (S)
- numbered_list = sep.join ([str (j + 1) + '. ' + string_list[j]
- for j in range (len (string_list))]) + '\n'
- t = retries
- while t > 0:
- value = ''
- stdout.write (message +
- "(press Enter to discard and start a new search)\n")
- input = raw_input (numbered_list)
- if not input:
- return ''
- try:
- value = string_list[int (input) - 1]
- except IndexError:
- stdout.write ("Error: index number out of range\n")
- except ValueError:
- matches = [input in v for v in string_list]
- n = matches.count (True)
- if n == 0:
- stdout.write ("Error: input matches no item in the list\n")
- elif n > 1:
- stdout.write ("Error: ambiguous input (matches several items \
-in the list)\n")
- else:
- value = string_list[matches.index (True)]
- if value:
- return value
- t -= 1
- raise InteractionError ("%d retries limit exceeded" % retries)
-
-refs_count = 0
-
-def check_ref (manual, file, m):
- global fixes_count, bad_refs_count, refs_count
- refs_count += 1
- bad_ref = False
- fixed = True
- type = m.group (1)
- original_name = m.group ('ref') or m.group ('refname')
- name = whitespace_re.sub (' ', original_name). strip ()
- newline_indices = manuals[manual]['newline_indices'][file]
- line = which_line (m.start (), newline_indices)
- linebroken = '\n' in original_name
- original_display_name = m.group ('display')
- next_char = m.group ('last')
- if original_display_name: # the xref has an explicit display name
- display_linebroken = '\n' in original_display_name
- display_name = whitespace_re.sub (' ', original_display_name). strip ()
- commented_out = is_commented_out \
- (m.start (), m.end (), manuals[manual]['comments_boundaries'][file])
- useful_fix = not outdir in file
-
- # check puncuation after x-ref
- if options.check_punctuation and not next_char in '.,;:!?':
- stdout.write ("Warning: %s: %d: `%s': x-ref \
-not followed by punctuation\n" % (file, line, name))
-
- # validate xref
- explicit_type = type
- new_name = name
-
- if type != 'ref' and type == manual and not commented_out:
- if useful_fix:
- fixed = False
- bad_ref = True
- stdout.write ("\n%s: %d: `%s': external %s x-ref should be internal\n"
- % (file, line, name, type))
- if options.auto_fix or yes_prompt ("Fix this?"):
- type = 'ref'
-
- if type == 'ref':
- explicit_type = manual
-
- if not name in manuals[explicit_type]['nodes'] and not commented_out:
- bad_ref = True
- fixed = False
- stdout.write ('\n')
- if type == 'ref':
- stdout.write ("\e[1;31m%s: %d: `%s': wrong internal x-ref\e[0m\n"
- % (file, line, name))
- else:
- stdout.write ("\e[1;31m%s: %d: `%s': wrong external `%s' x-ref\e[0m\n"
- % (file, line, name, type))
- # print context
- stdout.write ('--\n' + manuals[manual]['contents'][file]
- [newline_indices[max (0, line - 2)]:
- newline_indices[min (line + 3,
- len (newline_indices) - 1)]] +
- '--\n')
-
- # try to find the reference in other manuals
- found = []
- for k in [k for k in manuals if k != explicit_type]:
- if name in manuals[k]['nodes']:
- if k == manual:
- found = ['ref']
- stdout.write ("\e[1;32m found as internal x-ref\e[0m\n")
- break
- else:
- found.append (k)
- stdout.write ("\e[1;32m found as `%s' x-ref\e[0m\n" % k)
-
- if (len (found) == 1
- and (options.auto_fix or yes_prompt ("Fix this x-ref?"))):
- add_fix (type, name, found[0], name)
- type = found[0]
- fixed = True
-
- elif len (found) > 1 and useful_fix:
- if options.interactive or options.auto_fix:
- stdout.write ("* Several manuals contain this node name, \
-cannot determine manual automatically.\n")
- if options.interactive:
- t = choose_in_numbered_list ("Choose manual for this x-ref by \
-index number or beginning of name:\n", found)
- if t:
- add_fix (type, name, t, name)
- type = t
- fixed = True
-
- if not fixed:
- # try to find a fix already made
- found = lookup_fix (name)
-
- if len (found) == 1:
- stdout.write ("Found one previous fix: %s `%s'\n" % found[0])
- if options.auto_fix or yes_prompt ("Apply this fix?"):
- type, new_name = found[0]
- fixed = True
-
- elif len (found) > 1:
- if options.interactive or options.auto_fix:
- stdout.write ("* Several previous fixes match \
-this node name, cannot fix automatically.\n")
- if options.interactive:
- concatened = choose_in_numbered_list ("Choose new manual \
-and x-ref by index number or beginning of name:\n", [''.join ([i[0], ' ', i[1]])
- for i in found],
- sep='\n')
- if concatened:
- type, new_name = concatenated.split (' ', 1)
- fixed = True
-
- if not fixed:
- # all previous automatic fixing attempts failed,
- # ask user for substring to look in node names
- while True:
- node_list = search_prompt ()
- if node_list == None:
- if options.interactive:
- stdout.write (warn_not_fixed)
- break
- elif not node_list:
- stdout.write ("No matched node names.\n")
- else:
- concatenated = choose_in_numbered_list ("Choose \
-node name and manual for this x-ref by index number or beginning of name:\n", \
- [' '.join ([i[0], i[1], '(in %s)' % i[2]])
- for i in node_list],
- sep='\n')
- if concatenated:
- t, z = concatenated.split (' ', 1)
- new_name = z.split (' (in ', 1)[0]
- add_fix (type, name, t, new_name)
- type = t
- fixed = True
- break
-
- if fixed and type == manual:
- type = 'ref'
- bad_refs_count += int (bad_ref)
- if bad_ref and not useful_fix:
- stdout.write ("*** Warning: this file is automatically generated, \
-please fix the code source instead of generated documentation.\n")
-
- # compute returned string
- if new_name == name:
- if bad_ref and (options.interactive or options.auto_fix):
- # only the type of the ref was fixed
- fixes_count += int (fixed)
- if original_display_name:
- return ('@%snamed{%s,%s}' % (type, original_name, original_display_name)) + next_char
- else:
- return ('@%s{%s}' % (type, original_name)) + next_char
- else:
- fixes_count += int (fixed)
- (ref, n) = preserve_linebreak (new_name, linebroken)
- if original_display_name:
- if bad_ref:
- stdout.write ("Current display name is `%s'\n")
- display_name = raw_input \
- ("Enter a new display name or press enter to keep the existing name:\n") \
- or display_name
- (display_name, n) = preserve_linebreak (display_name, display_linebroken)
- else:
- display_name = original_display_name
- return ('@%snamed{%s,%s}' % (type, ref, display_name)) + \
- next_char + n
- else:
- return ('@%s{%s}' % (type, ref)) + next_char + n
-
-
-log.write ("Checking cross-references...\n")
-
-try:
- for key in manuals:
- for file in manuals[key]['contents']:
- s = ref_re.sub (lambda m: check_ref (key, file, m),
- manuals[key]['contents'][file])
- if s != manuals[key]['contents'][file]:
- open (file, 'w').write (s)
-except KeyboardInterrupt:
- log.write ("Operation interrupted, exiting.\n")
- sys.exit (2)
-except InteractionError, instance:
- log.write ("Operation refused by user: %s\nExiting.\n" % instance)
- sys.exit (3)
-
-log.write ("\e[1;36mDone: %d x-refs found, %d bad x-refs found, fixed %d.\e[0m\n" %
- (refs_count, bad_refs_count, fixes_count))
+++ /dev/null
-#!/usr/bin/env python
-
-import __main__
-import optparse
-import os
-import sys
-
-import langdefs
-import buildlib
-
-verbose = 0
-use_colors = False
-lang = 'C'
-C = lang
-
-def dir_lang (file, lang, lang_dir_index):
- path_components = file.split ('/')
- path_components[lang_dir_index] = lang
- return os.path.join (*path_components)
-
-def do_file (file_name, lang_codes, buildlib):
- if verbose:
- sys.stderr.write ('%s...\n' % file_name)
- split_file_name = file_name.split ('/')
- d1, d2 = split_file_name[0:2]
- if d1 in lang_codes:
- check_lang = d1
- lang_dir_index = 0
- elif d2 in lang_codes:
- check_lang = d2
- lang_dir_index = 1
- else:
- check_lang = lang
- if check_lang == C:
- raise Exception ('cannot determine language for ' + file_name)
-
- original = dir_lang (file_name, '', lang_dir_index)
- translated_contents = open (file_name).read ()
- (diff_string, error) \
- = buildlib.check_translated_doc (original,
- file_name,
- translated_contents,
- color=use_colors and not update_mode)
-
- if error:
- sys.stderr.write ('warning: %s: %s' % (file_name, error))
-
- if update_mode:
- if error or len (diff_string) >= os.path.getsize (original):
- buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + original)
- elif diff_string:
- diff_file = original + '.diff'
- f = open (diff_file, 'w')
- f.write (diff_string)
- f.close ()
- buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + diff_file)
- os.remove (diff_file)
- else:
- sys.stdout.write (diff_string)
-
-def usage ():
- sys.stdout.write (r'''
-Usage:
-check-translation [--language=LANG] [--verbose] [--update] FILE...
-
-This script is licensed under the GNU GPL.
-''')
-
-def do_options ():
- global lang, verbose, update_mode, use_colors
-
- p = optparse.OptionParser (usage="check-translation [--language=LANG] [--verbose] FILE...",
- description="This script is licensed under the GNU GPL.")
- p.add_option ("--language",
- action='store',
- default='site',
- dest="language")
- p.add_option ("--no-color",
- action='store_false',
- default=True,
- dest="color",
- help="do not print ANSI-cooured output")
- p.add_option ("--verbose",
- action='store_true',
- default=False,
- dest="verbose",
- help="print details, including executed shell commands")
- p.add_option ('-u', "--update",
- action='store_true',
- default=False,
- dest='update_mode',
- help='call $EDITOR to update the translation')
-
- (options, files) = p.parse_args ()
- verbose = options.verbose
- lang = options.language
- use_colors = options.color
- update_mode = options.update_mode
-
- return files
-
-def main ():
- global update_mode, text_editor
-
- files = do_options ()
- if 'EDITOR' in os.environ:
- text_editor = os.environ['EDITOR']
- else:
- update_mode = False
-
- buildlib.verbose = verbose
-
- for i in files:
- do_file (i, langdefs.LANGDICT.keys (), buildlib)
-
-if __name__ == '__main__':
- main ()
+++ /dev/null
-#!/usr/bin/python
-
-import os
-import glob
-import re
-import sys
-import optparse
-
-#File 'accidental-engraver.cc'
-#Lines executed:87.70% of 252
-
-def summary (args):
- results = []
- for f in args:
- str = open (f).read ()
- m = re.search ("File '([^']+.cc)'\s*Lines executed:([0-9.]+)% of ([0-9]+)", str)
-
- if m and '/usr/lib' in m.group (1):
- continue
-
- if m:
- cov = float (m.group (2))
- lines = int (m.group (3))
- pain = lines * (100.0 - cov)
- file = m.group (1)
- tup = (pain, locals ().copy())
-
- results.append(tup)
-
- results.sort ()
- results.reverse()
-
- print 'files sorted by number of untested lines (decreasing)'
- print
- print '%5s (%6s): %s' % ('cov %', 'lines', 'file')
- print '----------------------------------------------'
-
- for (pain, d) in results:
- print '%(cov)5.2f (%(lines)6d): %(file)s' % d
-
-class Chunk:
- def __init__ (self, range, coverage_count, all_lines, file):
- assert coverage_count >= 0
- assert type (range) == type (())
-
- self.coverage_count = coverage_count
- self.range = range
- self.all_lines = all_lines
- self.file = file
-
- def length (self):
- return self.range[1] - self.range[0]
-
- def text (self):
- return ''.join ([l[2] for l in self.lines()])
-
- def lines (self):
- return self.all_lines[self.range[0]:
- self.range[1]]
- def widen (self):
- self.range = (min (self.range[0] -1, 0),
- self.range[0] +1)
- def write (self):
- print 'chunk in', self.file
- for (c, n, l) in self.lines ():
- cov = '%d' % c
- if c == 0:
- cov = '#######'
- elif c < 0:
- cov = ''
- sys.stdout.write ('%8s:%8d:%s' % (cov, n, l))
-
- def uncovered_score (self):
- return self.length ()
-
-class SchemeChunk (Chunk):
- def uncovered_score (self):
- text = self.text ()
- if (text.startswith ('(define ')
- and not text.startswith ('(define (')):
- return 0
-
- if text.startswith ('(use-modules '):
- return 0
-
- if (text.startswith ('(define-public ')
- and not text.startswith ('(define-public (')):
- return 0
-
- return len ([l for (c,n,l) in self.lines() if (c == 0)])
-
-def read_gcov (f):
- ls = []
-
- in_lines = [l for l in open (f).readlines ()]
- (count_len, line_num_len) = tuple (map (len, in_lines[0].split (':')[:2]))
-
- for l in in_lines:
- c = l[:count_len].strip ()
- l = l[count_len+1:]
- n = int (l[:line_num_len].strip ())
-
- if n == 0:
- continue
-
- if '#' in c:
- c = 0
- elif c == '-':
- c = -1
- else:
- c = int (c)
-
- l = l[line_num_len+1:]
-
- ls.append ((c,n,l))
-
- return ls
-
-def get_c_chunks (ls, file):
- chunks = []
- chunk = []
-
- last_c = -1
- for (c, n, l) in ls:
- if not (c == last_c or c < 0 and l != '}\n'):
- if chunk and last_c >= 0:
- nums = [n-1 for (n, l) in chunk]
- chunks.append (Chunk ((min (nums), max (nums)+1),
- last_c, ls, file))
- chunk = []
-
- chunk.append ((n,l))
- if c >= 0:
- last_c = c
-
- return chunks
-
-def get_scm_chunks (ls, file):
- chunks = []
- chunk = []
-
- def new_chunk ():
- if chunk:
- nums = [n-1 for (n, l) in chunk]
- chunks.append (SchemeChunk ((min (nums), max (nums)+1),
- max (last_c, 0), ls, file))
- chunk[:] = []
-
- last_c = -1
- for (cov_count, line_number, line) in ls:
- if line.startswith ('('):
- new_chunk ()
- last_c = -1
-
- chunk.append ((line_number, line))
- if cov_count >= 0:
- last_c = cov_count
-
- return chunks
-
-def widen_chunk (ch, ls):
- a -= 1
- b += 1
-
- return [(n, l) for (c, n, l) in ls[a:b]]
-
-
-def extract_chunks (file):
- try:
- ls = read_gcov (file)
- except IOError, s :
- print s
- return []
-
- cs = []
- if 'scm' in file:
- cs = get_scm_chunks (ls, file)
- else:
- cs = get_c_chunks (ls, file)
- return cs
-
-
-def filter_uncovered (chunks):
- def interesting (c):
- if c.coverage_count > 0:
- return False
-
- t = c.text()
- for stat in ('warning', 'error', 'print', 'scm_gc_mark'):
- if stat in t:
- return False
- return True
-
- return [c for c in chunks if interesting (c)]
-
-
-def main ():
- p = optparse.OptionParser (usage="usage coverage.py [options] files",
- description="")
- p.add_option ("--summary",
- action='store_true',
- default=False,
- dest="summary")
-
- p.add_option ("--hotspots",
- default=False,
- action='store_true',
- dest="hotspots")
-
- p.add_option ("--uncovered",
- default=False,
- action='store_true',
- dest="uncovered")
-
-
- (options, args) = p.parse_args ()
-
-
- if options.summary:
- summary (['%s.gcov-summary' % s for s in args])
-
- if options.uncovered or options.hotspots:
- chunks = []
- for a in args:
- name = a
- if name.endswith ('scm'):
- name += '.cov'
- else:
- name += '.gcov'
-
- chunks += extract_chunks (name)
-
- if options.uncovered:
- chunks = filter_uncovered (chunks)
- chunks = [(c.uncovered_score (), c) for c in chunks if c.uncovered_score() > 0]
- elif options.hotspots:
- chunks = [((c.coverage_count, -c.length()), c) for c in chunks]
-
-
- chunks.sort ()
- chunks.reverse ()
- for (score, c) in chunks:
- c.write ()
-
-
-
-if __name__ == '__main__':
- main ()
+++ /dev/null
-#!@PYTHON@
-# -*- coding: utf-8 -*-
-# extract_texi_filenames.py
-
-# USAGE: extract_texi_filenames.py [-o OUTDIR] FILES
-#
-# -o OUTDIR specifies that output files should rather be written in OUTDIR
-#
-# Description:
-# This script parses the .texi file given and creates a file with the
-# nodename <=> filename/anchor map.
-# The idea behind: Unnumbered subsections go into the same file as the
-# previous numbered section, @translationof gives the original node name,
-# which is then used for the filename/anchor.
-#
-# If this script is run on a file texifile.texi, it produces a file
-# texifile[.LANG].xref-map with tab-separated entries of the form
-# NODE\tFILENAME\tANCHOR
-# LANG is the document language in case it's not 'en'
-# Note: The filename does not have any extension appended!
-# This file can then be used by our texi2html init script to determine
-# the correct file name and anchor for external refs
-
-import sys
-import re
-import os
-import getopt
-
-optlist, args = getopt.getopt (sys.argv[1:],'o:')
-files = args
-
-outdir = '.'
-for x in optlist:
- if x[0] == '-o':
- outdir = x[1]
-
-if not os.path.isdir (outdir):
- if os.path.exists (outdir):
- os.unlink (outdir)
- os.makedirs (outdir)
-
-include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M)
-whitespaces = re.compile (r'\s+')
-section_translation_re = re.compile ('^@(node|(?:unnumbered|appendix)\
-(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|\
-(?:major|chap|(?:sub){0,2})heading|translationof) (.*?)\\s*$', re.MULTILINE)
-
-def expand_includes (m, filename):
- filepath = os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'
- if os.path.exists (filepath):
- return extract_sections (filepath)[1]
- else:
- print "Unable to locate include file " + filepath
- return ''
-
-lang_re = re.compile (r'^@documentlanguage (.+)', re.M)
-
-def extract_sections (filename):
- result = ''
- f = open (filename, 'r')
- page = f.read ()
- f.close()
- # Search document language
- m = lang_re.search (page)
- if m and m.group (1) != 'en':
- lang_suffix = '.' + m.group (1)
- else:
- lang_suffix = ''
- # Replace all includes by their list of sections and extract all sections
- page = include_re.sub (lambda m: expand_includes (m, filename), page)
- sections = section_translation_re.findall (page)
- for sec in sections:
- result += "@" + sec[0] + " " + sec[1] + "\n"
- return (lang_suffix, result)
-
-# Convert a given node name to its proper file name (normalization as explained
-# in the texinfo manual:
-# http://www.gnu.org/software/texinfo/manual/texinfo/html_node/HTML-Xref-Node-Name-Expansion.html
-def texinfo_file_name(title):
- # exception: The top node is always mapped to index.html
- if title == "Top":
- return "index"
- # File name normalization by texinfo (described in the texinfo manual):
- # 1/2: letters and numbers are left unchanged
- # 3/4: multiple, leading and trailing whitespace is removed
- title = title.strip ();
- title = whitespaces.sub (' ', title)
- # 5: all remaining spaces are converted to '-'
- # 6: all other 7- or 8-bit chars are replaced by _xxxx (xxxx=ascii character code)
- result = ''
- for index in range(len(title)):
- char = title[index]
- if char == ' ': # space -> '-'
- result += '-'
- elif ( ('0' <= char and char <= '9' ) or
- ('A' <= char and char <= 'Z' ) or
- ('a' <= char and char <= 'z' ) ): # number or letter
- result += char
- else:
- ccode = ord(char)
- if ccode <= 0xFFFF:
- result += "_%04x" % ccode
- else:
- result += "__%06x" % ccode
- # 7: if name begins with number, prepend 't_g' (so it starts with a letter)
- if (result != '') and (ord(result[0]) in range (ord('0'), ord('9'))):
- result = 't_g' + result
- return result
-
-texinfo_re = re.compile (r'@.*{(.*)}')
-def remove_texinfo (title):
- return texinfo_re.sub (r'\1', title)
-
-def create_texinfo_anchor (title):
- return texinfo_file_name (remove_texinfo (title))
-
-unnumbered_re = re.compile (r'unnumbered.*')
-def process_sections (filename, lang_suffix, page):
- sections = section_translation_re.findall (page)
- basename = os.path.splitext (os.path.basename (filename))[0]
- p = os.path.join (outdir, basename) + lang_suffix + '.xref-map'
- f = open (p, 'w')
-
- this_title = ''
- this_filename = 'index'
- this_anchor = ''
- this_unnumbered = False
- had_section = False
- for sec in sections:
- if sec[0] == "node":
- # Write out the cached values to the file and start a new section:
- if this_title != '' and this_title != 'Top':
- f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
- had_section = False
- this_title = remove_texinfo (sec[1])
- this_anchor = create_texinfo_anchor (sec[1])
- elif sec[0] == "translationof":
- anchor = create_texinfo_anchor (sec[1])
- # If @translationof is used, it gives the original node name, which
- # we use for the anchor and the file name (if it is a numbered node)
- this_anchor = anchor
- if not this_unnumbered:
- this_filename = anchor
- else:
- # Some pages might not use a node for every section, so treat this
- # case here, too: If we already had a section and encounter enother
- # one before the next @node, we write out the old one and start
- # with the new values
- if had_section and this_title != '':
- f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
- this_title = remove_texinfo (sec[1])
- this_anchor = create_texinfo_anchor (sec[1])
- had_section = True
-
- # unnumbered nodes use the previously used file name, only numbered
- # nodes get their own filename! However, top-level @unnumbered
- # still get their own file.
- this_unnumbered = unnumbered_re.match (sec[0])
- if not this_unnumbered or sec[0] == "unnumbered":
- this_filename = this_anchor
-
- if this_title != '' and this_title != 'Top':
- f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
- f.close ()
-
-
-for filename in files:
- print "extract_texi_filenames.py: Processing %s" % filename
- (lang_suffix, sections) = extract_sections (filename)
- process_sections (filename, lang_suffix, sections)
+++ /dev/null
-#!/usr/bin/python
-import sys
-import re
-import os
-
-
-full_paths = {}
-incs = {}
-inc_re = re.compile ('^#include "([^"]+)"')
-def parse_file (fn):
- lst = []
-
- lc = 0
- for l in open (fn).readlines():
- lc += 1
- m = inc_re.search (l)
- if m:
- lst.append ((lc, m.group (1)))
-
- base = os.path.split (fn)[1]
- full_paths[base] = fn
- incs[base] = lst
-
-
-def has_include (f, name):
- try:
- return name in [b for (a,b) in incs[f]]
- except KeyError:
- return False
-
-for a in sys.argv:
- parse_file (a)
-
-print '-*-compilation-*-'
-for (f, lst) in incs.items ():
- for (n, inc) in lst:
- for (n2, inc2) in lst:
- if has_include (inc2, inc):
- print "%s:%d: already have %s from %s" % (full_paths[f], n,
- inc, inc2)
- break
-
-
-
+++ /dev/null
-#!/usr/bin/python
-
-# fixcc -- nitpick lily's c++ code
-
-# TODO
-# * maintainable rules: regexp's using whitespace (?x) and match names
-# <identifier>)
-# * trailing `*' vs. function definition
-# * do not break/change indentation of fixcc-clean files
-# * check lexer, parser
-# * rewrite in elisp, add to cc-mode
-# * using regexes is broken by design
-# * ?
-# * profit
-
-import __main__
-import getopt
-import os
-import re
-import string
-import sys
-import time
-
-COMMENT = 'COMMENT'
-STRING = 'STRING'
-GLOBAL_CXX = 'GC++'
-CXX = 'C++'
-verbose_p = 0
-indent_p = 0
-
-rules = {
- GLOBAL_CXX:
- [
- # delete gratuitous block
- ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
- '\n\\2;'),
- ],
- CXX:
- [
- # space before parenthesis open
- ('([^\( \]])[ \t]*\(', '\\1 ('),
- # space after comma
- ("\([^'],\)[ \t]*", '\1 '),
- # delete gratuitous block
- ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
- '\n\\2;'),
- # delete inline tabs
- ('(\w)\t+', '\\1 '),
- # delete inline double spaces
- (' *', ' '),
- # delete space after parenthesis open
- ('\([ \t]*', '('),
- # delete space before parenthesis close
- ('[ \t]*\)', ')'),
- # delete spaces after prefix
- ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'),
- # delete spaces before postfix
- ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'),
- # delete space after parenthesis close
- #('\)[ \t]*([^\w])', ')\\1'),
- # delete space around operator
- # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
- ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
- # delete space after operator
- ('(::)([ \t]*)([\w\(\)])', '\\1\\3'),
- # delete superflous space around operator
- ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'),
- # space around operator1
- ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
- # space around operator2
- ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'),
- # space around operator3
- ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
- # space around operator4
- ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'),
- # space around +/-; exponent
- ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'),
- ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'),
- # trailing operator
- (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '),
- # pointer
- ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
- ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
- #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'),
- # pointer with template
- ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'),
- #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'),
- # unary pointer, minus, not
- ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'),
- # space after `operator'
- ('(\Woperator) *([^\w\s])', '\\1 \\2'),
- # dangling brace close
- ('\n[ \t]*(\n[ \t]*})', '\\1'),
- # dangling newline
- ('\n[ \t]*\n[ \t]*\n', '\n\n'),
- # dangling parenthesis open
- #('[ \t]*\n[ \t]*\([ \t]*\n', '('),
- ('\([ \t]*\n', '('),
- # dangling parenthesis close
- ('\n[ \t]*\)', ')'),
- # dangling comma
- ('\n[ \t]*,', ','),
- # dangling semicolon
- ('\n[ \t]*;', ';'),
- # brace open
- ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'),
- # brace open backslash
- ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'),
- # brace close
- ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'),
- # brace close backslash
- ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'),
- # delete space after `operator'
- #('(\Woperator) (\W)', '\\1\\2'),
- # delete space after case, label
- ('(\W(case|label) ([\w]+)) :', '\\1:'),
- # delete space before comma
- ('[ \t]*,', ','),
- # delete space before semicolon
- ('[ \t]*;', ';'),
- # delete space before eol-backslash
- ('[ \t]*\\\\\n', '\\\n'),
- # delete trailing whitespace
- ('[ \t]*\n', '\n'),
-
- ## Deuglify code that also gets ugly by rules above.
- # delete newline after typedef struct
- ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'),
- # delete spaces around template brackets
- #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'),
- ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'),
- ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'),
- ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'),
- ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'),
- # do {..} while
- ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'),
-
- ## Fix code that gets broken by rules above.
- ##('->\s+\*', '->*'),
- # delete space before #define x()
- ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('),
- # add space in #define x ()
- ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)',
- '#define \\1 \\2'),
- # delete space in #include <>
- ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>',
- '#include <\\1\\2\\3>'),
- # delete backslash before empty line (emacs' indent region is broken)
- ('\\\\\n\n', '\n\n'),
- ],
-
- COMMENT:
- [
- # delete trailing whitespace
- ('[ \t]*\n', '\n'),
- # delete empty first lines
- ('(/\*\n)\n*', '\\1'),
- # delete empty last lines
- ('\n*(\n\*/)', '\\1'),
- ## delete newline after start?
- #('/(\*)\n', '\\1'),
- ## delete newline before end?
- #('\n(\*/)', '\\1'),
- ],
- }
-
-# Recognize special sequences in the input.
-#
-# (?P<name>regex) -- Assign result of REGEX to NAME.
-# *? -- Match non-greedily.
-# (?m) -- Multiline regex: Make ^ and $ match at each line.
-# (?s) -- Make the dot match all characters including newline.
-# (?x) -- Ignore whitespace in patterns.
-no_match = 'a\ba'
-snippet_res = {
- CXX: {
- 'multiline_comment':
- r'''(?sx)
- (?P<match>
- (?P<code>
- [ \t]*/\*.*?\*/))''',
-
- 'singleline_comment':
- r'''(?mx)
- ^.*
- (?P<match>
- (?P<code>
- [ \t]*//([ \t][^\n]*|)\n))''',
-
- 'string':
- r'''(?x)
- (?P<match>
- (?P<code>
- "([^\"\n](\")*)*"))''',
-
- 'char':
- r'''(?x)
- (?P<match>
- (?P<code>
- '([^']+|\')))''',
-
- 'include':
- r'''(?x)
- (?P<match>
- (?P<code>
- "#[ \t]*include[ \t]*<[^>]*>''',
- },
- }
-
-class Chunk:
- def replacement_text (self):
- return ''
-
- def filter_text (self):
- return self.replacement_text ()
-
-class Substring (Chunk):
- def __init__ (self, source, start, end):
- self.source = source
- self.start = start
- self.end = end
-
- def replacement_text (self):
- s = self.source[self.start:self.end]
- if verbose_p:
- sys.stderr.write ('CXX Rules')
- for i in rules[CXX]:
- if verbose_p:
- sys.stderr.write ('.')
- #sys.stderr.write ('\n\n***********\n')
- #sys.stderr.write (i[0])
- #sys.stderr.write ('\n***********\n')
- #sys.stderr.write ('\n=========>>\n')
- #sys.stderr.write (s)
- #sys.stderr.write ('\n<<=========\n')
- s = re.sub (i[0], i[1], s)
- if verbose_p:
- sys.stderr.write ('done\n')
- return s
-
-
-class Snippet (Chunk):
- def __init__ (self, type, match, format):
- self.type = type
- self.match = match
- self.hash = 0
- self.options = []
- self.format = format
-
- def replacement_text (self):
- return self.match.group ('match')
-
- def substring (self, s):
- return self.match.group (s)
-
- def __repr__ (self):
- return `self.__class__` + ' type = ' + self.type
-
-class Multiline_comment (Snippet):
- def __init__ (self, source, match, format):
- self.type = type
- self.match = match
- self.hash = 0
- self.options = []
- self.format = format
-
- def replacement_text (self):
- s = self.match.group ('match')
- if verbose_p:
- sys.stderr.write ('COMMENT Rules')
- for i in rules[COMMENT]:
- if verbose_p:
- sys.stderr.write ('.')
- s = re.sub (i[0], i[1], s)
- return s
-
-snippet_type_to_class = {
- 'multiline_comment': Multiline_comment,
-# 'string': Multiline_comment,
-# 'include': Include_snippet,
-}
-
-def find_toplevel_snippets (s, types):
- if verbose_p:
- sys.stderr.write ('Dissecting')
-
- res = {}
- for i in types:
- res[i] = re.compile (snippet_res[format][i])
-
- snippets = []
- index = 0
- ## found = dict (map (lambda x: (x, None),
- ## types))
- ## urg python2.1
- found = {}
- map (lambda x, f = found: f.setdefault (x, None),
- types)
-
- # We want to search for multiple regexes, without searching
- # the string multiple times for one regex.
- # Hence, we use earlier results to limit the string portion
- # where we search.
- # Since every part of the string is traversed at most once for
- # every type of snippet, this is linear.
-
- while 1:
- if verbose_p:
- sys.stderr.write ('.')
- first = None
- endex = 1 << 30
- for type in types:
- if not found[type] or found[type][0] < index:
- found[type] = None
- m = res[type].search (s[index:endex])
- if not m:
- continue
-
- cl = Snippet
- if snippet_type_to_class.has_key (type):
- cl = snippet_type_to_class[type]
- snip = cl (type, m, format)
- start = index + m.start ('match')
- found[type] = (start, snip)
-
- if found[type] \
- and (not first \
- or found[type][0] < found[first][0]):
- first = type
-
- # FIXME.
-
- # Limiting the search space is a cute
- # idea, but this *requires* to search
- # for possible containing blocks
- # first, at least as long as we do not
- # search for the start of blocks, but
- # always/directly for the entire
- # @block ... @end block.
-
- endex = found[first][0]
-
- if not first:
- snippets.append (Substring (s, index, len (s)))
- break
-
- (start, snip) = found[first]
- snippets.append (Substring (s, index, start))
- snippets.append (snip)
- found[first] = None
- index = start + len (snip.match.group ('match'))
-
- return snippets
-
-def nitpick_file (outdir, file):
- s = open (file).read ()
-
- for i in rules[GLOBAL_CXX]:
- s = re.sub (i[0], i[1], s)
-
- # FIXME: Containing blocks must be first, see
- # find_toplevel_snippets.
- # We leave simple strings be part of the code
- snippet_types = (
- 'multiline_comment',
- 'singleline_comment',
- 'string',
-# 'char',
- )
-
- chunks = find_toplevel_snippets (s, snippet_types)
- #code = filter (lambda x: is_derived_class (x.__class__, Substring),
- # chunks)
-
- t = string.join (map (lambda x: x.filter_text (), chunks), '')
- fixt = file
- if s != t:
- if not outdir:
- os.system ('mv %s %s~' % (file, file))
- else:
- fixt = os.path.join (outdir,
- os.path.basename (file))
- h = open (fixt, "w")
- h.write (t)
- h.close ()
- if s != t or indent_p:
- indent_file (fixt)
-
-def indent_file (file):
- emacs = '''emacs\
- --no-window-system\
- --batch\
- --no-site-file\
- --no-init-file\
- %(file)s\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "cc-mode")
- (c++-mode)
- (indent-region (point-min) (point-max))
- (if (buffer-modified-p (current-buffer))
- (save-buffer)))' ''' % vars ()
- emacsclient = '''emacsclient\
- --socket-name=%(socketdir)s/%(socketname)s\
- --no-wait\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "cc-mode")
- (find-file "%(file)s")
- (c++-mode)
- (indent-region (point-min) (point-max))
- (if (buffer-modified-p (current-buffer))
- (save-buffer)))' ''' \
- % { 'file': file,
- 'socketdir' : socketdir,
- 'socketname' : socketname, }
- if verbose_p:
- sys.stderr.write (emacs)
- sys.stderr.write ('\n')
- os.system (emacs)
-
-
-def usage ():
- sys.stdout.write (r'''
-Usage:
-fixcc [OPTION]... FILE...
-
-Options:
- --help
- --indent reindent, even if no changes
- --verbose
- --test
-
-Typical use with LilyPond:
-
- fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out)
-
-This script is licensed under the GNU GPL
-''')
-
-def do_options ():
- global indent_p, outdir, verbose_p
- (options, files) = getopt.getopt (sys.argv[1:], '',
- ['help', 'indent', 'outdir=',
- 'test', 'verbose'])
- for (o, a) in options:
- if o == '--help':
- usage ()
- sys.exit (0)
- elif o == '--indent':
- indent_p = 1
- elif o == '--outdir':
- outdir = a
- elif o == '--verbose':
- verbose_p = 1
- elif o == '--test':
- test ()
- sys.exit (0)
- else:
- assert unimplemented
- if not files:
- usage ()
- sys.exit (2)
- return files
-
-
-outdir = 0
-format = CXX
-socketdir = '/tmp/fixcc'
-socketname = 'fixcc%d' % os.getpid ()
-
-def setup_client ():
- #--no-window-system\
- #--batch\
- os.unlink (os.path.join (socketdir, socketname))
- os.mkdir (socketdir, 0700)
- emacs='''emacs\
- --no-site-file\
- --no-init-file\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "server")
- (setq server-socket-dir "%(socketdir)s")
- (setq server-name "%(socketname)s")
- (server-start)
- (while t) (sleep 1000))' ''' \
- % { 'socketdir' : socketdir,
- 'socketname' : socketname, }
-
- if not os.fork ():
- os.system (emacs)
- sys.exit (0)
- while not os.path.exists (os.path.join (socketdir, socketname)):
- time.sleep (1)
-
-def main ():
- #emacsclient should be faster, but this does not work yet
- #setup_client ()
- files = do_options ()
- if outdir and not os.path.isdir (outdir):
- os.makedirs (outdir)
- for i in files:
- sys.stderr.write ('%s...\n' % i)
- nitpick_file (outdir, i)
-
-
-## TODO: make this compilable and check with g++
-TEST = '''
-#include <libio.h>
-#include <map>
-class
-ostream ;
-
-class Foo {
-public: static char* foo ();
-std::map<char*,int>* bar (char, char) { return 0; }
-};
-typedef struct
-{
- Foo **bar;
-} String;
-
-ostream &
-operator << (ostream & os, String d);
-
-typedef struct _t_ligature
-{
- char *succ, *lig;
- struct _t_ligature * next;
-} AFM_Ligature;
-
-typedef std::map < AFM_Ligature const *, int > Bar;
-
- /**
- (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
- */
-
-/* ||
-* vv
-* !OK OK
-*/
-/* ||
- vv
- !OK OK
-*/
-char *
-Foo:: foo ()
-{
-int
-i
-;
- char* a= &++ i ;
- a [*++ a] = (char*) foe (*i, &bar) *
- 2;
- int operator double ();
- std::map<char*,int> y =*bar(-*a ,*b);
- Interval_t<T> & operator*= (T r);
- Foo<T>*c;
- int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2);
- delete *p;
- if (abs (f)*2 > abs (d) *FUDGE)
- ;
- while (0);
- for (; i<x foo(); foo>bar);
- for (; *p && > y;
- foo > bar)
-;
- do {
- ;;;
- }
- while (foe);
-
- squiggle. extent;
- 1 && * unsmob_moment (lf);
- line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm
-(): SCM_EOL);
- case foo: k;
-
- if (0) {a=b;} else {
- c=d;
- }
-
- cookie_io_functions_t Memory_out_stream::functions_ = {
- Memory_out_stream::reader,
- ...
- };
-
- int compare (Array < Pitch> *, Array < Pitch> *);
- original_ = (Grob *) & s;
- Drul_array< Link_array<Grob> > o;
-}
-
- header_.char_info_pos = (6 + header_length) * 4;
- return ly_bool2scm (*ma < * mb);
-
- 1 *::sign(2);
-
- (shift) *-d;
-
- a = 0 ? *x : *y;
-
-a = "foo() 2,2,4";
-{
- if (!span_)
- {
- span_ = make_spanner ("StaffSymbol", SCM_EOL);
- }
-}
-{
- if (!span_)
- {
- span_ = make_spanner (StaffSymbol, SCM_EOL);
- }
-}
-'''
-
-def test ():
- test_file = 'fixcc.cc'
- open (test_file, 'w').write (TEST)
- nitpick_file (outdir, test_file)
- sys.stdout.write (open (test_file).read ())
-
-if __name__ == '__main__':
- main ()
-
+++ /dev/null
-#!@PYTHON@
-import sys
-import getopt
-import re
-import os
-
-(options, files) = \
- getopt.getopt (sys.argv[1:],
- '',
- ['dir='])
-
-
-outdir = ''
-for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--dir':
- outdir = a
- else:
- print o
- raise getopt.error
-
-# Ugh
-for design_size in [11,13,14,16,18,20,23,26]:
- name = 'Emmentaler'
- filename = name.lower ()
- script = '''#!@FONTFORGE@
-
-New();
-
-# Separate Feta versioning?
-# * using 20 as Weight works for gnome-font-select widget: gfs
-
-notice = "";
-notice += "This font is distributed under the GNU General Public License. ";
-notice += "As a special exception, if you create a document which uses ";
-notice += "this font, and embed this font or unaltered portions of this ";
-notice += "font into the document, this font does not by itself cause the ";
-notice += "resulting document to be covered by the GNU General Public License.";;
-
-SetFontNames("%(name)s-%(design_size)d", "%(name)s", "%(name)s %(design_size)d", "%(design_size)d", notice, "@TOPLEVEL_VERSION@");
-
-MergeFonts("feta%(design_size)d.pfb");
-MergeFonts("parmesan%(design_size)d.pfb");
-
-# load nummer/din after setting PUA.
-i = 0;
-while (i < CharCnt())
- Select(i);
-# crashes fontforge, use PUA for now -- jcn
-# SetUnicodeValue(i + 0xF0000, 0);
-/*
-PRIVATE AREA
- In the BMP, the range 0xe000 to 0xf8ff will never be assigned to any
- characters by the standard and is reserved for private usage. For the
- Linux community, this private area has been subdivided further into the
- range 0xe000 to 0xefff which can be used individually by any end-user
- and the Linux zone in the range 0xf000 to 0xf8ff where extensions are
- coordinated among all Linux users. The registry of the characters
- assigned to the Linux zone is currently maintained by H. Peter Anvin
- <Peter.Anvin@linux.org>.
-*/
- SetUnicodeValue(i + 0xE000, 0);
- ++i;
-endloop
-
-
-MergeFonts("feta-alphabet%(design_size)d.pfb");
-MergeKern("feta-alphabet%(design_size)d.tfm");
-
-LoadTableFromFile("LILF", "%(filename)s-%(design_size)d.subfonts");
-LoadTableFromFile("LILC", "feta%(design_size)d.otf-table");
-LoadTableFromFile("LILY", "feta%(design_size)d.otf-gtable");
-
-Generate("%(filename)s-%(design_size)d.otf");
-Generate("%(filename)s-%(design_size)d.svg");
-''' % vars()
-
- basename = '%s-%d' % (filename, design_size)
- path = os.path.join (outdir, basename + '.pe')
- open (path, 'w').write (script)
-
- subfonts = ['feta%(design_size)d',
- 'parmesan%(design_size)d',
- 'feta-alphabet%(design_size)d']
-
- ns = []
- for s in subfonts:
- ns.append ('%s' % (s % vars()))
-
- subfonts_str = ' '.join (ns)
-
- open (os.path.join (outdir, '%(filename)s-%(design_size)d.subfonts' % vars()), 'w').write (subfonts_str)
-
- path = os.path.join (outdir, '%s-%d.dep' % (filename, design_size))
-
- deps = r'''%(filename)s-%(design_size)d.otf: $(outdir)/feta%(design_size)d.pfa \
- $(outdir)/parmesan%(design_size)d.pfa \
- $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-table \
- $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-gtable
-''' % vars()
- open (path, 'w').write (deps)
-
- open (os.path.join (outdir, basename + '.fontname'), 'w').write ("%s-%d" % (name, design_size))
+++ /dev/null
-#!@PYTHON@
-import os
-import sys
-import tempfile
-
-base = os.path.splitext (os.path.split (sys.argv[1])[1])[0]
-input = os.path.abspath (sys.argv[1])
-output = os.path.abspath (sys.argv[2])
-program_name= os.path.split (sys.argv[0])[1]
-
-dir = tempfile.mktemp (program_name)
-os.mkdir (dir, 0777)
-os.chdir(dir)
-
-def system (c):
- print c
- if os.system (c):
- raise 'barf'
-
-outputs = []
-for sz in [48,32,16] :
-
- for depth in [24,8]:
- out = '%(base)s-%(sz)d-%(depth)d.png' % locals()
- system ('convert -depth %(depth)d -sample %(sz)d %(input)s %(out)s' %
- locals ())
- outputs.append (out)
-
-system('icotool --output %s --create %s' % (output, ' '.join (outputs)))
-system('rm -rf %(dir)s' % locals())
-
+++ /dev/null
-#!/usr/bin/python
-
-import sys
-import time
-import os
-import re
-import optparse
-
-def read_pipe (x):
- print 'pipe', x
- return os.popen (x).read ()
-
-def system (x):
- print x
- return os.system (x)
-
-class PatchFailed(Exception):
- pass
-
-def sign (x):
- if x < 0:
- return -1
- if x > 0:
- return 1
-
- return 0
-
-
-class Commit:
- def __init__ (self, dict):
- for v in ('message',
- 'date',
- 'author',
- 'committish'):
- self.__dict__[v] = dict[v]
-
- self.date = ' '.join (self.date.split (' ')[:-1])
- self.date = time.strptime (self.date, '%a %b %d %H:%M:%S %Y')
-
- m = re.search ('(.*)<(.*)>', self.author)
- self.email = m.group (2).strip ()
- self.name = m.group (1).strip ()
- self.diff = read_pipe ('git show %s' % self.committish)
- def compare (self, other):
- return sign (time.mktime (self.date) - time.mktime (other.date))
-
-
- def check_diff_chunk (self, filename, chunk):
- removals = []
- def note_removal (m):
- removals.append (m.group (1))
-
- re.sub ('\n-([^\n]+)', note_removal, chunk)
-
- if removals == []:
- return True
- if not os.path.exists (filename):
- return False
-
- contents = open (filename).read ()
- for r in removals:
- if r not in contents:
- return False
-
- return True
-
- def check_diff (self):
- chunks = re.split ('\ndiff --git ', self.diff)
-
- ok = True
- for c in chunks:
- m = re.search ('^a/([^ ]+)', c)
- if not m:
- continue
-
- file = m.group (1)
-
- c = re.sub('\n--- [^\n]+', '', c)
- ok = ok and self.check_diff_chunk (file, c)
- if not ok:
- break
-
- return ok
-
- def touched_files (self):
- files = []
- def note_file (x):
- files.append (x.group (1))
- return ''
-
- re.sub ('\n--- a/([^\n]+)\n',
- note_file, self.diff)
- re.sub('\n--- /dev/null\n\\+\\+\\+ b/([^\n]+)',
- note_file, self.diff)
-
- return files
-
- def has_patch (self):
- return self.touched_files () <> []
-
- def apply (self, add_del_files):
- def note_add_file (x):
- add_del_files.append (('add', x.group (1)))
- return ''
-
- def note_del_file (x):
- add_del_files.append (('del', x.group (1)))
- return ''
-
- re.sub('\n--- /dev/null\n\\+\\+\\+ b/([^\n]+)',
- note_add_file, self.diff)
-
- re.sub('\n--- a/([^\n]+)\n\\+\\+\\+ /dev/null',
- note_del_file, self.diff)
-
- p = os.popen ('patch -f -p1 ', 'w')
- p.write (self.diff)
-
- if p.close ():
- raise PatchFailed, self.committish
-
-
-def parse_commit_log (log):
- committish = re.search ('^([^\n]+)', log).group (1)
- author = re.search ('\nAuthor:\s+([^\n]+)', log).group (1)
- date_match = re.search ('\nDate:\s+([^\n]+)', log)
- date = date_match.group (1)
- log = log[date_match.end (1):]
-
- message = re.sub ("\n *", '', log)
- message = message.strip ()
-
- c = Commit (locals ())
- return c
-
-def parse_add_changes (from_commit, max_count=0):
- opt = ''
- rest = '..'
- if max_count:
-
- # fixme.
- assert max_count == 1
- opt = '--max-count=%d' % max_count
- rest = ''
-
- log = read_pipe ('git log %(opt)s %(from_commit)s%(rest)s' % locals ())
-
- log = log[len ('commit '):]
- log = log.strip ()
-
- if not log:
- return []
-
- commits = map (parse_commit_log, re.split ('\ncommit ', log))
- commits.reverse ()
-
- return commits
-
-
-def header (commit):
- return '%d-%02d-%02d %s <%s>\n' % (commit.date[:3] + (commit.name, commit.email))
-
-def changelog_body (commit):
- s = ''
- s += ''.join ('\n* %s: ' % f for f in commit.touched_files())
- s += '\n' + commit.message
-
- s = s.replace ('\n', '\n\t')
- s += '\n'
- return s
-
-def main ():
- p = optparse.OptionParser (usage="usage git-update-changelog.py [options] [commits]",
- description="""
-Apply GIT patches and update change log.
-
-Run this file from the CVS directory, with commits from the repository in --git-dir.
-
-""")
- p.add_option ("--start",
- action='store',
- default='',
- metavar="FIRST",
- dest="start",
- help="all commits starting with FIRST (exclusive).")
-
- p.add_option ("--git-dir",
- action='store',
- default='',
- dest="gitdir",
- help="the GIT directory to merge.")
-
- (options, args) = p.parse_args ()
-
- log = open ('ChangeLog').read ()
-
- if options.gitdir:
- os.environ['GIT_DIR'] = options.gitdir
-
-
- if not args:
- if not options.start:
- print 'Must set start committish.'
- sys.exit (1)
-
- commits = parse_add_changes (options.start)
- else:
- commits = []
- for a in args:
- commits += parse_add_changes (a, max_count=1)
-
- if not commits:
- return
-
- new_log = ''
- last_commit = None
-
- first = header (commits[0]) + '\n'
- if first == log[:len (first)]:
- log = log[len (first):]
-
- try:
- previously_done = dict((c, 1) for c in open ('.git-commits-done').read ().split ('\n'))
- except IOError:
- previously_done = {}
-
- commits = [c for c in commits if not previously_done.has_key (c.committish)]
- commits = sorted (commits, cmp=Commit.compare)
-
- system ('cvs up')
-
- file_adddel = []
- collated_log = ''
- collated_message = ''
- commits_done = []
- while commits:
- c = commits[0]
-
- if not c.has_patch ():
- print 'patchless commit (merge?)'
- continue
-
- ok = c.check_diff ()
-
- if not ok:
- print "Patch doesn't seem to apply"
- print 'skipping', c.committish
- print 'message:', c.message
-
- break
-
-
- commits = commits[1:]
- commits_done.append (c)
-
- print 'patch ', c.committish
- try:
- c.apply (file_adddel)
- except PatchFailed:
- break
-
- if c.touched_files () == ['ChangeLog']:
- continue
-
- if (last_commit
- and c.author != last_commit.author
- and c.date[:3] != last_commit.date[:3]):
-
- new_log += header (last_commit)
-
- collated_log = changelog_body (c) + collated_log
- last_commit = c
-
- collated_message += c.message + '\n'
-
-
-
- for (op, f) in file_adddel:
- if op == 'del':
- system ('cvs remove %(f)s' % locals ())
- if op == 'add':
- system ('cvs add %(f)s' % locals ())
-
- if last_commit:
- collated_log = header (last_commit) + collated_log + '\n'
-
- log = collated_log + log
-
- try:
- os.unlink ('ChangeLog~')
- except OSError:
- pass
-
- os.rename ('ChangeLog', 'ChangeLog~')
- open ('ChangeLog', 'w').write (log)
-
- open ('.msg','w').write (collated_message)
- print '\nCommit message\n**\n%s\n**\n' % collated_message
- print '\nRun:\n\n\tcvs commit -F .msg\n\n'
- print '\n\techo %s >> .git-commits-done\n\n' % ' '.join ([c.committish
- for c in commits_done])
-
-
- if commits:
- print 'Commits left to do:'
- print ' '.join ([c.committish for c in commits])
-
-main ()
-
-
-
+++ /dev/null
-#! @BASH@
-# note: dash does not work
-
-pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*2007' '\1 2007--2008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change')
-pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*([^-]*--)(200[0-7])' '\1 \2\062008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change')
+++ /dev/null
-#!@PERL@ -w
-
-# Generate a short man page from --help and --version output.
-# Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software
-# Foundation, Inc.
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-# Written by Brendan O'Dea <bod@debian.org>
-# Available from ftp://ftp.gnu.org/gnu/help2man/
-
-use 5.005;
-use strict;
-use Getopt::Long;
-use Text::Tabs qw(expand);
-use POSIX qw(strftime setlocale LC_TIME);
-
-my $this_program = 'help2man';
-my $this_version = '1.28';
-my $version_info = <<EOT;
-GNU $this_program $this_version
-
-Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-Written by Brendan O'Dea <bod\@debian.org>
-EOT
-
-my $help_info = <<EOT;
-`$this_program' generates a man page out of `--help' and `--version' output.
-
-Usage: $this_program [OPTIONS]... EXECUTABLE
-
- -n, --name=STRING description for the NAME paragraph
- -s, --section=SECTION section number for manual page (1, 6, 8)
- -m, --manual=TEXT name of manual (User Commands, ...)
- -S, --source=TEXT source of program (FSF, Debian, ...)
- -i, --include=FILE include material from `FILE'
- -I, --opt-include=FILE include material from `FILE' if it exists
- -o, --output=FILE send output to `FILE'
- -p, --info-page=TEXT name of Texinfo manual
- -N, --no-info suppress pointer to Texinfo manual
- --help print this help, then exit
- --version print version number, then exit
-
-EXECUTABLE should accept `--help' and `--version' options although
-alternatives may be specified using:
-
- -h, --help-option=STRING help option string
- -v, --version-option=STRING version option string
-
-Report bugs to <bug-help2man\@gnu.org>.
-EOT
-
-my $section = 1;
-my $manual = '';
-my $source = '';
-my $help_option = '--help';
-my $version_option = '--version';
-my ($opt_name, @opt_include, $opt_output, $opt_info, $opt_no_info);
-
-my %opt_def = (
- 'n|name=s' => \$opt_name,
- 's|section=s' => \$section,
- 'm|manual=s' => \$manual,
- 'S|source=s' => \$source,
- 'i|include=s' => sub { push @opt_include, [ pop, 1 ] },
- 'I|opt-include=s' => sub { push @opt_include, [ pop, 0 ] },
- 'o|output=s' => \$opt_output,
- 'p|info-page=s' => \$opt_info,
- 'N|no-info' => \$opt_no_info,
- 'h|help-option=s' => \$help_option,
- 'v|version-option=s' => \$version_option,
-);
-
-# Parse options.
-Getopt::Long::config('bundling');
-GetOptions (%opt_def,
- help => sub { print $help_info; exit },
- version => sub { print $version_info; exit },
-) or die $help_info;
-
-die $help_info unless @ARGV == 1;
-
-my %include = ();
-my %append = ();
-my @include = (); # retain order given in include file
-
-# Process include file (if given). Format is:
-#
-# [section name]
-# verbatim text
-#
-# or
-#
-# /pattern/
-# verbatim text
-#
-
-while (@opt_include)
-{
- my ($inc, $required) = @{shift @opt_include};
-
- next unless -f $inc or $required;
- die "$this_program: can't open `$inc' ($!)\n"
- unless open INC, $inc;
-
- my $key;
- my $hash = \%include;
-
- while (<INC>)
- {
- # [section]
- if (/^\[([^]]+)\]/)
- {
- $key = uc $1;
- $key =~ s/^\s+//;
- $key =~ s/\s+$//;
- $hash = \%include;
- push @include, $key unless $include{$key};
- next;
- }
-
- # /pattern/
- if (m!^/(.*)/([ims]*)!)
- {
- my $pat = $2 ? "(?$2)$1" : $1;
-
- # Check pattern.
- eval { $key = qr($pat) };
- if ($@)
- {
- $@ =~ s/ at .*? line \d.*//;
- die "$inc:$.:$@";
- }
-
- $hash = \%append;
- next;
- }
-
- # Check for options before the first section--anything else is
- # silently ignored, allowing the first for comments and
- # revision info.
- unless ($key)
- {
- # handle options
- if (/^-/)
- {
- local @ARGV = split;
- GetOptions %opt_def;
- }
-
- next;
- }
-
- $hash->{$key} ||= '';
- $hash->{$key} .= $_;
- }
-
- close INC;
-
- die "$this_program: no valid information found in `$inc'\n"
- unless $key;
-}
-
-# Compress trailing blank lines.
-for my $hash (\(%include, %append))
-{
- for (keys %$hash) { $hash->{$_} =~ s/\n+$/\n/ }
-}
-
-# Turn off localisation of executable's output.
-@ENV{qw(LANGUAGE LANG LC_ALL)} = ('C') x 3;
-
-# Turn off localisation of date (for strftime).
-setlocale LC_TIME, 'C';
-
-# Grab help and version info from executable.
-my ($help_text, $version_text) = map {
- join '', map { s/ +$//; expand $_ } `$ARGV[0] $_ 2>/dev/null`
- or die "$this_program: can't get `$_' info from $ARGV[0]\n"
-} $help_option, $version_option;
-
-my $date = strftime "%B %Y", localtime;
-(my $program = $ARGV[0]) =~ s!.*/!!;
-my $package = $program;
-my $version;
-
-if ($opt_output)
-{
- unlink $opt_output
- or die "$this_program: can't unlink $opt_output ($!)\n"
- if -e $opt_output;
-
- open STDOUT, ">$opt_output"
- or die "$this_program: can't create $opt_output ($!)\n";
-}
-
-# The first line of the --version information is assumed to be in one
-# of the following formats:
-#
-# <version>
-# <program> <version>
-# {GNU,Free} <program> <version>
-# <program> ({GNU,Free} <package>) <version>
-# <program> - {GNU,Free} <package> <version>
-#
-# and seperated from any copyright/author details by a blank line.
-
-($_, $version_text) = split /\n+/, $version_text, 2;
-
-if (/^(\S+) +\(((?:GNU|Free) +[^)]+)\) +(.*)/ or
- /^(\S+) +- *((?:GNU|Free) +\S+) +(.*)/)
-{
- $program = $1;
- $package = $2;
- $version = $3;
-}
-elsif (/^((?:GNU|Free) +)?(\S+) +(.*)/)
-{
- $program = $2;
- $package = $1 ? "$1$2" : $2;
- $version = $3;
-}
-else
-{
- $version = $_;
-}
-
-$program =~ s!.*/!!;
-
-# No info for `info' itself.
-$opt_no_info = 1 if $program eq 'info';
-
-# --name overrides --include contents.
-$include{NAME} = "$program \\- $opt_name\n" if $opt_name;
-
-# Default (useless) NAME paragraph.
-$include{NAME} ||= "$program \\- manual page for $program $version\n";
-
-# Man pages traditionally have the page title in caps.
-my $PROGRAM = uc $program;
-
-# Set default page head/footers
-$source ||= "$program $version";
-unless ($manual)
-{
- for ($section)
- {
- if (/^(1[Mm]|8)/) { $manual = 'System Administration Utilities' }
- elsif (/^6/) { $manual = 'Games' }
- else { $manual = 'User Commands' }
- }
-}
-
-# Extract usage clause(s) [if any] for SYNOPSIS.
-if ($help_text =~ s/^Usage:( +(\S+))(.*)((?:\n(?: {6}\1| *or: +\S).*)*)//m)
-{
- my @syn = $2 . $3;
-
- if ($_ = $4)
- {
- s/^\n//;
- for (split /\n/) { s/^ *(or: +)?//; push @syn, $_ }
- }
-
- my $synopsis = '';
- for (@syn)
- {
- $synopsis .= ".br\n" if $synopsis;
- s!^\S*/!!;
- s/^(\S+) *//;
- $synopsis .= ".B $1\n";
- s/\s+$//;
- s/(([][]|\.\.+)+)/\\fR$1\\fI/g;
- s/^/\\fI/ unless s/^\\fR//;
- $_ .= '\fR';
- s/(\\fI)( *)/$2$1/g;
- s/\\fI\\fR//g;
- s/^\\fR//;
- s/\\fI$//;
- s/^\./\\&./;
-
- $synopsis .= "$_\n";
- }
-
- $include{SYNOPSIS} ||= $synopsis;
-}
-
-# Process text, initial section is DESCRIPTION.
-my $sect = 'DESCRIPTION';
-$_ = "$help_text\n\n$version_text";
-
-# Normalise paragraph breaks.
-s/^\n+//;
-s/\n*$/\n/;
-s/\n\n+/\n\n/g;
-
-# Temporarily exchange leading dots, apostrophes and backslashes for
-# tokens.
-s/^\./\x80/mg;
-s/^'/\x81/mg;
-s/\\/\x82/g;
-
-# Start a new paragraph (if required) for these.
-s/([^\n])\n(Report +bugs|Email +bug +reports +to|Written +by)/$1\n\n$2/g;
-
-sub convert_option;
-
-while (length)
-{
- # Convert some standard paragraph names.
- if (s/^(Options|Examples): *\n//)
- {
- $sect = uc $1;
- next;
- }
-
- # Copyright section
- if (/^Copyright +[(\xa9]/)
- {
- $sect = 'COPYRIGHT';
- $include{$sect} ||= '';
- $include{$sect} .= ".PP\n" if $include{$sect};
-
- my $copy;
- ($copy, $_) = split /\n\n/, $_, 2;
-
- for ($copy)
- {
- # Add back newline
- s/\n*$/\n/;
-
- # Convert iso9959-1 copyright symbol or (c) to nroff
- # character.
- s/^Copyright +(?:\xa9|\([Cc]\))/Copyright \\(co/mg;
-
- # Insert line breaks before additional copyright messages
- # and the disclaimer.
- s/(.)\n(Copyright |This +is +free +software)/$1\n.br\n$2/g;
-
- # Join hyphenated lines.
- s/([A-Za-z])-\n */$1/g;
- }
-
- $include{$sect} .= $copy;
- $_ ||= '';
- next;
- }
-
- # Catch bug report text.
- if (/^(Report +bugs|Email +bug +reports +to) /)
- {
- $sect = 'REPORTING BUGS';
- }
-
- # Author section.
- elsif (/^Written +by/)
- {
- $sect = 'AUTHOR';
- }
-
- # Examples, indicated by an indented leading $, % or > are
- # rendered in a constant width font.
- if (/^( +)([\$\%>] )\S/)
- {
- my $indent = $1;
- my $prefix = $2;
- my $break = '.IP';
- $include{$sect} ||= '';
- while (s/^$indent\Q$prefix\E(\S.*)\n*//)
- {
- $include{$sect} .= "$break\n\\f(CW$prefix$1\\fR\n";
- $break = '.br';
- }
-
- next;
- }
-
- my $matched = '';
- $include{$sect} ||= '';
-
- # Sub-sections have a trailing colon and the second line indented.
- if (s/^(\S.*:) *\n / /)
- {
- $matched .= $& if %append;
- $include{$sect} .= qq(.SS "$1"\n);
- }
-
- my $indent = 0;
- my $content = '';
-
- # Option with description.
- if (s/^( {1,10}([+-]\S.*?))(?:( +(?!-))|\n( {20,}))(\S.*)\n//)
- {
- $matched .= $& if %append;
- $indent = length ($4 || "$1$3");
- $content = ".TP\n\x83$2\n\x83$5\n";
- unless ($4)
- {
- # Indent may be different on second line.
- $indent = length $& if /^ {20,}/;
- }
- }
-
- # Option without description.
- elsif (s/^ {1,10}([+-]\S.*)\n//)
- {
- $matched .= $& if %append;
- $content = ".HP\n\x83$1\n";
- $indent = 80; # not continued
- }
-
- # Indented paragraph with tag.
- elsif (s/^( +(\S.*?) +)(\S.*)\n//)
- {
- $matched .= $& if %append;
- $indent = length $1;
- $content = ".TP\n\x83$2\n\x83$3\n";
- }
-
- # Indented paragraph.
- elsif (s/^( +)(\S.*)\n//)
- {
- $matched .= $& if %append;
- $indent = length $1;
- $content = ".IP\n\x83$2\n";
- }
-
- # Left justified paragraph.
- else
- {
- s/(.*)\n//;
- $matched .= $& if %append;
- $content = ".PP\n" if $include{$sect};
- $content .= "$1\n";
- }
-
- # Append continuations.
- while (s/^ {$indent}(\S.*)\n//)
- {
- $matched .= $& if %append;
- $content .= "\x83$1\n"
- }
-
- # Move to next paragraph.
- s/^\n+//;
-
- for ($content)
- {
- # Leading dot and apostrophe protection.
- s/\x83\./\x80/g;
- s/\x83'/\x81/g;
- s/\x83//g;
-
- # Convert options.
- s/(^| )(-[][\w=-]+)/$1 . convert_option $2/mge;
- }
-
- # Check if matched paragraph contains /pat/.
- if (%append)
- {
- for my $pat (keys %append)
- {
- if ($matched =~ $pat)
- {
- $content .= ".PP\n" unless $append{$pat} =~ /^\./;
- $content .= $append{$pat};
- }
- }
- }
-
- $include{$sect} .= $content;
-}
-
-# Refer to the real documentation.
-unless ($opt_no_info)
-{
- my $info_page = $opt_info || $program;
-
- $sect = 'SEE ALSO';
- $include{$sect} ||= '';
- $include{$sect} .= ".PP\n" if $include{$sect};
- $include{$sect} .= <<EOT;
-The full documentation for
-.B $program
-is maintained as a Texinfo manual. If the
-.B info
-and
-.B $program
-programs are properly installed at your site, the command
-.IP
-.B info $info_page
-.PP
-should give you access to the complete manual.
-EOT
-}
-
-# Output header.
-print <<EOT;
-.\\" DO NOT MODIFY THIS FILE! It was generated by $this_program $this_version.
-.TH $PROGRAM "$section" "$date" "$source" "$manual"
-EOT
-
-# Section ordering.
-my @pre = qw(NAME SYNOPSIS DESCRIPTION OPTIONS EXAMPLES);
-my @post = ('AUTHOR', 'REPORTING BUGS', 'COPYRIGHT', 'SEE ALSO');
-my $filter = join '|', @pre, @post;
-
-# Output content.
-for (@pre, (grep ! /^($filter)$/o, @include), @post)
-{
- if ($include{$_})
- {
- my $quote = /\W/ ? '"' : '';
- print ".SH $quote$_$quote\n";
-
- for ($include{$_})
- {
- # Replace leading dot, apostrophe and backslash tokens.
- s/\x80/\\&./g;
- s/\x81/\\&'/g;
- s/\x82/\\e/g;
- print;
- }
- }
-}
-
-exit;
-
-# Convert option dashes to \- to stop nroff from hyphenating 'em, and
-# embolden. Option arguments get italicised.
-sub convert_option
-{
- local $_ = '\fB' . shift;
-
- s/-/\\-/g;
- unless (s/\[=(.*)\]$/\\fR[=\\fI$1\\fR]/)
- {
- s/=(.)/\\fR=\\fI$1/;
- s/ (.)/ \\fI$1/;
- $_ .= '\fR';
- }
-
- $_;
-}
+++ /dev/null
-#!@PYTHON@
-# html-gettext.py
-
-# USAGE: html-gettext.py [-o OUTDIR] LANG FILES
-#
-# -o OUTDIR specifies that output files should be written in OUTDIR
-# rather than be overwritten
-#
-
-import sys
-import re
-import os
-import getopt
-
-import langdefs
-
-optlist, args = getopt.getopt(sys.argv[1:],'o:')
-lang = args[0]
-files = args [1:]
-
-outdir = '.'
-for x in optlist:
- if x[0] == '-o':
- outdir = x[1]
-
-double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
-my_gettext = langdefs.translation[lang]
-
-html_codes = ((' -- ', ' – '),
- (' --- ', ' — '),
- ("'", '’'))
-texi_html_conversion = {
- 'command': {
- 'html2texi':
- (re.compile (r'(?:<samp><span class="command">|<code>)(.*?)(?:</span></samp>|</code>)'),
- r'@command{\1}'),
- 'texi2html':
- (re.compile (r'@command{(.*?)}'),
- r'<code>\1</code>'),
- },
- 'code': {
- 'html2texi':
- (re.compile (r'<code>(.*?)</code>'),
- r'@code{\1}'),
- 'texi2html':
- (re.compile (r'@code{(.*?)}'),
- r'<code>\1</code>'),
- },
- }
-
-whitespaces = re.compile (r'\s+')
-
-
-def _ (s):
- if not s:
- return ''
- str = whitespaces.sub (' ', s)
- for c in html_codes:
- str = str.replace (c[1], c[0])
- for command in texi_html_conversion:
- d = texi_html_conversion[command]
- str = d['html2texi'][0].sub (d['html2texi'][1], str)
- str = my_gettext (str)
- str = d['texi2html'][0].sub (d['texi2html'][1], str)
- for c in html_codes:
- str = str.replace (c[0], c[1])
- return str
-
-link_re = re.compile (r'<link rel="(up|prev|next)" (.*?) title="([^"]*?)">')
-
-def link_gettext (m):
- return '<link rel="' + m.group (1) + '" ' + m.group (2) \
- + ' title="' + _ (m.group (3)) + '">'
-
-makeinfo_title_re = re.compile (r'<title>([^<]*?) - ([^<]*?)</title>')
-
-def makeinfo_title_gettext (m):
- return '<title>' + _ (m.group (1)) + ' - ' + m.group (2) + '</title>'
-
-texi2html_title_re = re.compile (r'<title>(.+): ([A-Z\d.]+ |)(.+?)</title>')
-
-def texi2html_title_gettext (m):
- return '<title>' + _ (m.group (1)) + double_punct_char_separator + ': ' \
- + m.group (2) + _ (m.group (3)) + '</title>'
-
-a_href_re = re.compile ('(?s)<a (?P<attributes>[^>]*?href="[\\w.#-_]+"[^>]*?>)(?P<code><code>)?\
-(?P<appendix>Appendix )?(?P<leading>[A-Z0-9.]+ | (?:<){1,2} | [^>:]+?: | |)\
-(?P<name>(?:<samp><span class="command">|</?code>|</span>|[^>])+?)(?P<end_code>(?(code)</code>|))\
-(?P<trailing> (?:>){1,2} | |)</a>:?')
-
-def a_href_gettext (m):
- s = ''
- if m.group(0)[-1] == ':':
- s = double_punct_char_separator + ':'
- t = ''
- if m.group ('appendix'):
- t = _ (m.group ('appendix'))
- return '<a ' + m.group ('attributes') + (m.group ('code') or '') + \
- t + m.group ('leading') + _ (m.group ('name')) + \
- m.group ('end_code') + m.group ('trailing') + '</a>' + s
-
-h_re = re.compile (r'<h(\d)( class="\w+"|)>\s*(Appendix |)([A-Z\d.]+ |)(.+?)\s*</h\1>')
-
-def h_gettext (m):
- if m.group (3):
- s = _ (m.group (3))
- else:
- s= ''
- return '<h' + m.group (1) + m.group (2) + '>' + s +\
- m.group (4) + _ (m.group (5)) + '</h' + m.group (1) + '>'
-
-for filename in files:
- f = open (filename, 'r')
- page = f.read ()
- f.close ()
- page = link_re.sub (link_gettext, page)
- page = makeinfo_title_re.sub (makeinfo_title_gettext, page)
- page = texi2html_title_re.sub (texi2html_title_gettext, page)
- page = a_href_re.sub (a_href_gettext, page)
- page = h_re.sub (h_gettext, page)
- for w in ('Next:', 'Previous:', 'Up:'):
- page = page.replace (w, _ (w))
- page = langdefs.LANGDICT[lang].html_filter (page)
- f = open (os.path.join (outdir, filename), 'w')
- f.write (page)
- f.close ()
+++ /dev/null
-#!@BASH@
-
-name=install-info-html
-version=1.0
-
-all=
-index_dir=.
-
-#
-# debugging
-#
-debug_echo=:
-
-
-#
-# print usage
-#
-help ()
-{
- cat << EOF
-$name $version
-Install HTML info document.
-
-Usage: $name [OPTIONS]... [DOCUMENT-DIR]...
-
-Options:
- -a, --all assume all subdirectories of index to be DOCUMENT-DIRs
- -d, --dir=DIR set index directory to DIR (default=.)
- -D, --debug print debugging info
- -h, --help show this help text
- -v, --version show version
-EOF
-}
-
-
-cleanup ()
-{
- $debug_echo "cleaning ($?)..."
-}
-
-trap cleanup 0 9 15
-
-#
-# Find command line options and switches
-#
-
-# "x:" x takes argument
-#
-options="adhvW:"
-#
-# ugh, "\-" is a hack to support long options
-# must be in double quotes for bash-2.0
-
-while getopts "\-:$options" O
-do
- $debug_echo "O: \`$O'"
- $debug_echo "arg: \`$OPTARG'"
- case $O in
- a)
- all=yes
- ;;
- D)
- [ "$debug_echo" = "echo" ] && set -x
- debug_echo=echo
- ;;
- h)
- help;
- exit 0
- ;;
- v)
- echo $name $version
- exit 0
- ;;
- d)
- index_dir=$OPTARG
- ;;
- # a long option!
- -)
- case "$OPTARG" in
- a*|-a*)
- all=yes
- ;;
- de*|-de*)
- [ "$debug_echo" = "echo" ] && set -x
- debug_echo=echo
- ;;
- h*|-h*)
- help;
- exit 0
- ;;
- di*|-di*)
- index_dir="`expr \"$OPTARG\" ':' '[^=]*=\(.*\)'`"
- ;;
- version|-version)
- echo $name $version
- exit 0
- ;;
- *|-*)
- echo "$0: invalid option -- \"$OPTARG\""
- help;
- exit -1
- ;;
- esac
- esac
-done
-shift `expr $OPTIND - 1`
-
-#
-# Input file name
-#
-if [ -z "$all" -a -z "$1" ]; then
- help
- echo "$name: No HTML documents given"
- exit 2
-fi
-
-if [ -n "$all" -a -n "$1" ]; then
- echo "$name: --all specified, ignoring DIRECTORY-DIRs"
-fi
-
-if [ -n "$all" ]; then
- document_dirs=`/bin/ls -d1 $index_dir`
-else
- document_dirs=$*
-fi
-
-index_file=$index_dir/index.html
-rm -f $index_file
-echo -n "$name: Writing index: $index_file..."
-
-# head
-cat >> $index_file <<EOF
-<html>
-<title>Info documentation index</title>
-<body>
-<h1>Info documentation index</h1>
-<p>
-This is the directory file \`index.html' a.k.a. \`DIR', which contains the
-topmost node of the HTML Info hierarchy.
-</p>
-<ul>
-EOF
-
-#list
-for i in $document_dirs; do
- cat <<EOF
-<li> <a href="$i/index.html">$i</a> (<a href="$i.html">$i as one big page</a>)</li>
-EOF
-done >> $index_file
-
-# foot
-cat >> $index_file <<EOF
-</ul>
-</body>
-</html>
-EOF
-echo
+++ /dev/null
-#!@PYTHON@
-
-# Created 01 September 2003 by Heikki Junes.
-# Rewritten by John Mandereau
-
-# Generates lilypond-words.el for (X)Emacs and lilypond-words[.vim] for Vim.
-
-import re
-import sys
-import os
-import getopt
-
-keywords = []
-reserved_words = []
-note_names = []
-
-# keywords not otherwise found
-keywords += ['include', 'maininput', 'version']
-
-# the main keywords
-s = open ('lily/lily-lexer.cc', 'r').read ()
-keywords += [w for w in re.findall (r"\s*{\"(.+)\",\s*.*},\s*\n", s)]
-
-s = open ('scm/markup.scm', 'r').read ()
-keywords += [w for w in re.findall (r"(?m)^\s*\(cons\s*([a-z-]+)-markup", s)]
-
-# identifiers and keywords
-for name in ['ly/chord-modifiers-init.ly',
- 'ly/dynamic-scripts-init.ly',
- 'ly/engraver-init.ly',
- 'ly/grace-init.ly',
- 'ly/gregorian.ly',
- 'ly/music-functions-init.ly',
- 'ly/performer-init.ly',
- 'ly/property-init.ly',
- 'ly/scale-definitions-init.ly',
- 'ly/script-init.ly',
- 'ly/spanners-init.ly',
- 'ly/declarations-init.ly',
- 'ly/params-init.ly']:
- s = open (name, 'r').read ()
- keywords += [w for w in re.findall (r"(?m)^\s*\"?([a-zA-Z]+)\"?\s*=", s)]
-
-# note names
-for name in ['ly/catalan.ly',
- 'ly/deutsch.ly',
- 'ly/drumpitch-init.ly',
- 'ly/english.ly',
- 'ly/espanol.ly',
- 'ly/italiano.ly',
- 'ly/nederlands.ly',
- 'ly/norsk.ly',
- 'ly/portugues.ly',
- 'ly/suomi.ly',
- 'ly/svenska.ly',
- 'ly/vlaams.ly']:
- s = open (name, 'r').read ()
- note_names += [n for n in re.findall (r"(?m)^\s*\(([a-z]+)[^l]+ly:make-pitch", s)]
-
-# reserved words
-for name in ['ly/engraver-init.ly',
- 'ly/performer-init.ly']:
- s = open (name, 'r').read ()
- for pattern in [r"(?m)^\s*.consists\s+\"([a-zA-Z_]+)\"",
- r"[\\]name\s+[\"]?([a-zA-Z_]+)[\"]?",
- r"\s+([a-zA-Z_]+)\s*\\(?:set|override)"]:
- reserved_words += [w for w in re.findall (pattern, s)]
-
-keywords = list (set (keywords))
-keywords.sort (reverse=True)
-
-reserved_words = list (set (reserved_words))
-reserved_words.sort (reverse=True)
-
-note_names = list (set (note_names))
-note_names.sort (reverse=True)
-
-
-# output
-outdir = ''
-out_words = False
-out_el = False
-out_vim = False
-
-options = getopt.getopt (sys.argv[1:],
- '', ['words', 'el', 'vim', 'dir='])[0]
-
-for (o, a) in options:
- if o == '--words':
- out_words = True
- elif o == '--el':
- out_el = True
- elif o == '--vim':
- out_vim = True
- elif o == '--dir':
- outdir = a
-
-if out_words or out_el:
- outstring = ''.join (['\\\\' + w + '\n' for w in keywords])
- outstring += ''.join ([w + '\n' for w in reserved_words])
- outstring += ''.join ([w + '\n' for w in note_names])
-
-if out_words:
- f = open (os.path.join (outdir, 'lilypond-words'), 'w')
- f.write (outstring)
-
-if out_el:
- f = open (os.path.join (outdir, 'lilypond-words.el'), 'w')
- f.write (outstring)
-
- # the menu in lilypond-mode.el
- # for easier typing of this list, replace '/' with '\' below
- # when writing to file
- elisp_menu = ['/( - _ /) -',
- '/[ - _ /] -',
- '< - _ > -',
- '<< - _ >> -',
- '///( - _ ///) -',
- '///[ - _ ///] -',
- '///< - _ ///! -',
- '///> - _ ///! -',
- '//center - / << _ >> -',
- '//column - / << _ >> -',
- '//context/ Staff/ = - % { _ } -',
- '//context/ Voice/ = - % { _ } -',
- '//markup - { _ } -',
- '//notes - { _ } -',
- '//relative - % { _ } -',
- '//score - { //n /? //simultaneous { //n _ //n } /! //n //paper { } //n /? //midi { } //n /! } //n -',
- '//simultaneous - { _ } -',
- '//sustainDown - _ //sustainUp -',
- '//times - % { _ } -',
- '//transpose - % { _ } -',
- '']
- f.write ('\n'.join ([line.replace ('/', '\\') for line in elisp_menu]))
-
-if out_vim:
- f = open (os.path.join (outdir, 'lilypond-words.vim'), 'w')
- f.write ('syn match lilyKeyword \"[-_^]\\?\\\\\\(')
- f.write (''.join ([w + '\\|' for w in keywords]))
- f.write ('n\\)\\(\\A\\|\\n\\)\"me=e-1\n')
-
- f.write ('syn match lilyReservedWord \"\\(\\A\\|\\n\\)\\(')
- f.write (''.join ([w + '\\|' for w in reserved_words]))
- f.write ('Score\\)\\(\\A\\|\\n\\)\"ms=s+1,me=e-1\n')
-
- f.write ('syn match lilyNote \"\\<\\(\\(\\(')
- f.write (''.join ([w + '\\|' for w in note_names]))
- f.write ('a\\)\\([,\']\\)\\{,4}\\([?!]\\)\\?\\)\\|s\\|r\\)\\(\\(128\\|64\\|32\\|16\\|8\\|4\\|2\\|1\\|\\\\breve\\|\\\\longa\\|\\\\maxima\\)[.]\\{,8}\\)\\?\\(\\A\\|\\n\\)\"me=e-1\n')
+++ /dev/null
-#!@PYTHON@
-
-
-'''
-TODO:
-
- * Add @nodes, split at sections?
-
-'''
-
-
-import sys
-import os
-import getopt
-import re
-
-program_name = 'lys-to-tely'
-
-include_snippets = '@lysnippets'
-fragment_options = 'printfilename,texidoc'
-help_text = r"""Usage: %(program_name)s [OPTIONS]... LY-FILE...
-Construct tely doc from LY-FILEs.
-
-Options:
- -h, --help print this help
- -f, --fragment-options=OPTIONS use OPTIONS as lilypond-book fragment
- options
- -o, --output=NAME write tely doc to NAME
- -t, --title=TITLE set tely doc title TITLE
- --template=TEMPLATE use TEMPLATE as Texinfo template file,
- instead of standard template; TEMPLATE should contain a command
- '%(include_snippets)s' to tell where to insert LY-FILEs. When this
- option is used, NAME and TITLE are ignored.
-"""
-
-def help (text):
- sys.stdout.write ( text)
- sys.exit (0)
-
-(options, files) = getopt.getopt (sys.argv[1:], 'f:hn:t:',
- ['fragment-options=', 'help', 'name=', 'title=', 'template='])
-
-name = "ly-doc"
-title = "Ly Doc"
-template = '''\input texinfo
-@setfilename %%(name)s.info
-@settitle %%(title)s
-
-@documentencoding utf-8
-@iftex
-@afourpaper
-@end iftex
-
-@finalout @c we do not want black boxes.
-
-@c fool ls-latex
-@ignore
-@author Han-Wen Nienhuys and Jan Nieuwenhuizen
-@title %%(title)s
-@end ignore
-
-@node Top, , , (dir)
-@top %%(title)s
-
-%s
-
-@bye
-''' % include_snippets
-
-for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '-h' or o == '--help':
- # We can't use vars () inside a function, as that only contains all
- # local variables and none of the global variables! Thus we have to
- # generate the help text here and pass it to the function...
- help (help_text % vars ())
- elif o == '-n' or o == '--name':
- name = a
- elif o == '-t' or o == '--title':
- title = a
- elif o == '-f' or o == '--fragment-options':
- fragment_options = a
- elif o == '--template':
- template = open (a, 'r').read ()
- else:
- raise Exception ('unknown option: ' + o)
-
-texi_file_re = re.compile ('.*\.i?te(ly|xi)$')
-
-def name2line (n):
- if texi_file_re.match (n):
- # We have a texi include file, simply include it:
- s = r"@include %s" % os.path.basename (n)
- else:
- # Assume it's a lilypond file -> create image etc.
- s = r"""
-@ifhtml
-@html
-<a name="%s"></a>
-@end html
-@end ifhtml
-
-@lilypondfile[%s]{%s}
-""" % (os.path.basename (n), fragment_options, n)
- return s
-
-if files:
- dir = os.path.dirname (name) or "."
-# don't strip .tely extension, input/lsr uses .itely
- name = os.path.basename (name)
- template = template % vars ()
-
- s = "\n".join (map (name2line, files))
- s = template.replace (include_snippets, s, 1)
- f = "%s/%s" % (dir, name)
- sys.stderr.write ("%s: writing %s..." % (program_name, f))
- h = open (f, "w")
- h.write (s)
- h.close ()
- sys.stderr.write ('\n')
-else:
- # not Unix philosophy, but hey, at least we notice when
- # we don't distribute any .ly files.
- sys.stderr.write ("No files specified. Doing nothing")
+++ /dev/null
-#!/usr/bin/env python
-
-import sys
-import os
-import glob
-import re
-
-USAGE = ''' Usage: makelsr.py LSR_SNIPPETS_DIR
-This script must be run from top of the source tree;
-it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR.
-'''
-
-LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it
-%% This file is in the public domain.
-'''
-
-LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new
-%% This file is in the public domain.
-'''
-
-DEST = os.path.join ('input', 'lsr')
-NEW_LYS = os.path.join ('input', 'new')
-TEXIDOCS = os.path.join ('input', 'texidocs')
-
-TAGS = []
-# NR 1
-TAGS.extend (['pitches', 'rhythms', 'expressive-marks',
-'repeats', 'simultaneous-notes', 'staff-notation',
-'editorial-annotations', 'text'])
-# NR 2
-TAGS.extend (['vocal-music', 'chords', 'keyboards',
-'percussion', 'fretted-strings', 'unfretted-strings',
-'ancient-notation', 'winds', 'world-music'
-])
-
-# other
-TAGS.extend (['contexts-and-engravers', 'tweaks-and-overrides',
-'paper-and-layout', 'breaks', 'spacing', 'midi', 'titles', 'template'])
-
-def exit_with_usage (n=0):
- sys.stderr.write (USAGE)
- sys.exit (n)
-
-try:
- in_dir = sys.argv[1]
-except:
- exit_with_usage (2)
-
-if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)):
- exit_with_usage (3)
-
-unsafe = []
-unconverted = []
-notags_files = []
-
-# mark the section that will be printed verbatim by lilypond-book
-end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S)
-
-def mark_verbatim_section (ly_code):
- return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1)
-
-# '% LSR' comments are to be stripped
-lsr_comment_re = re.compile (r'\s*%+\s*LSR.*')
-
-begin_header_re = re.compile (r'\\header\s*{', re.M)
-
-# add tags to ly files from LSR
-def add_tags (ly_code, tags):
- return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1)
-
-def copy_ly (srcdir, name, tags):
- global unsafe
- global unconverted
- dest = os.path.join (DEST, name)
- tags = ', '.join (tags)
- s = open (os.path.join (srcdir, name)).read ()
-
- texidoc_translations_path = os.path.join (TEXIDOCS,
- os.path.splitext (name)[0] + '.texidoc')
- if os.path.exists (texidoc_translations_path):
- texidoc_translations = open (texidoc_translations_path).read ()
- # Since we want to insert the translations verbatim using a
- # regexp, \\ is understood as ONE escaped backslash. So we have
- # to escape those backslashes once more...
- texidoc_translations = texidoc_translations.replace ('\\', '\\\\')
- s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1)
-
- if in_dir in srcdir:
- s = LY_HEADER_LSR + add_tags (s, tags)
- else:
- s = LY_HEADER_NEW + s
-
- s = mark_verbatim_section (s)
- s = lsr_comment_re.sub ('', s)
- open (dest, 'w').write (s)
-
- e = os.system ("convert-ly -e '%s'" % dest)
- if e:
- unconverted.append (dest)
- if os.path.exists (dest + '~'):
- os.remove (dest + '~')
- # -V seems to make unsafe snippets fail nicer/sooner
- e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest)
- if e:
- unsafe.append (dest)
-
-def read_source_with_dirs (src):
- s = {}
- l = {}
- for tag in TAGS:
- srcdir = os.path.join (src, tag)
- l[tag] = set (map (os.path.basename, glob.glob (os.path.join (srcdir, '*.ly'))))
- for f in l[tag]:
- if f in s:
- s[f][1].append (tag)
- else:
- s[f] = (srcdir, [tag])
- return s, l
-
-
-tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"')
-
-def read_source (src):
- s = {}
- l = dict ([(tag, set()) for tag in TAGS])
- for f in glob.glob (os.path.join (src, '*.ly')):
- basename = os.path.basename (f)
- m = tags_re.search (open (f, 'r').read ())
- if m:
- file_tags = [tag.strip() for tag in m.group (1). split(',')]
- s[basename] = (src, file_tags)
- [l[tag].add (basename) for tag in file_tags if tag in TAGS]
- else:
- notags_files.append (f)
- return s, l
-
-
-def dump_file_list (file, list):
- f = open (file, 'w')
- f.write ('\n'.join (list) + '\n')
-
-## clean out existing lys and generated files
-map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) +
- glob.glob (os.path.join (DEST, '*.snippet-list')))
-
-# read LSR source where tags are defined by subdirs
-snippets, tag_lists = read_source_with_dirs (in_dir)
-# read input/new where tags are directly
-s, l = read_source (NEW_LYS)
-snippets.update (s)
-for t in TAGS:
- tag_lists[t].update (l[t])
-
-for (name, (srcdir, tags)) in snippets.items ():
- copy_ly (srcdir, name, tags)
-
-for (tag, file_set) in tag_lists.items ():
- dump_file_list (os.path.join (DEST, tag + '.snippet-list'), sorted(file_set))
-
-if unconverted:
- sys.stderr.write ('These files could not be converted successfully by convert-ly:\n')
- sys.stderr.write ('\n'.join (unconverted) + '\n\n')
-
-if notags_files:
- sys.stderr.write ('No tags could be found in these files:\n')
- sys.stderr.write ('\n'.join (notags_files) + '\n\n')
-
-dump_file_list ('lsr-unsafe.txt', unsafe)
-sys.stderr.write ('''
-
-Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY!
- git add input/lsr/*.ly
- xargs git-diff HEAD < lsr-unsafe.txt
-
-''')
-
+++ /dev/null
-#!/usr/bin/python
-
-# This module is imported by check_texi_refs.py
-
-references_dict = {
- 'lilypond': 'ruser',
- 'lilypond-learning': 'rlearning',
- 'lilypond-program': 'rprogram',
- 'lilypond-snippets': 'rlsr',
- 'music-glossary': 'rglos',
- 'lilypond-internals': 'rinternals' }
+++ /dev/null
-#!@PYTHON@
-# mass-link.py
-
-# USAGE: mass-link.py [--prepend-suffix SUFFIX] symbolic | hard SOURCEDIR DESTDIR FILES
-#
-# create hard or symbolic links to SOURCEDIR/FILES in DESTDIR
-#
-# If --prepend-suffix is specified, link to foo.bar will be called fooSUFFIX.bar.
-# Shell wildcards expansion is performed on FILES.
-
-import sys
-import os
-import glob
-import getopt
-
-print "mass-link.py"
-
-optlist, args = getopt.getopt (sys.argv[1:], '', ['prepend-suffix='])
-link_type, source_dir, dest_dir = args[0:3]
-files = args[3:]
-
-source_dir = os.path.normpath (source_dir)
-dest_dir = os.path.normpath (dest_dir)
-
-prepended_suffix = ''
-for x in optlist:
- if x[0] == '--prepend-suffix':
- prepended_suffix = x[1]
-
-if prepended_suffix:
- def insert_suffix (p):
- l = p.split ('.')
- if len (l) >= 2:
- l[-2] += prepended_suffix
- return '.'.join (l)
- return p + prepended_suffix
-else:
- insert_suffix = lambda p: p
-
-if link_type == 'symbolic':
- link = os.symlink
-elif link_type == 'hard':
- link = os.link
-else:
- sys.stderr.write(sys.argv[0] + ': ' + link_type + ": wrong argument, expected 'symbolic' or 'hard'\n")
- sys.exit (1)
-
-sourcefiles = []
-for pattern in files:
- sourcefiles += (glob.glob (os.path.join (source_dir, pattern)))
-
-def relative_path (f):
- if source_dir == '.':
- return f
- return f[len (source_dir) + 1:]
-
-destfiles = [os.path.join (dest_dir, insert_suffix (relative_path (f))) for f in sourcefiles]
-
-destdirs = set ([os.path.dirname (dest) for dest in destfiles])
-[os.makedirs (d) for d in destdirs if not os.path.exists (d)]
-
-def force_link (src,dest):
- if os.path.exists (dest):
- os.system ('rm -f ' + dest)
- link (src, dest)
-
-map (force_link, sourcefiles, destfiles)
+++ /dev/null
-#!@PYTHON@
-
-# mf-to-table.py -- convert spacing info in MF logs .
-#
-# source file of the GNU LilyPond music typesetter
-#
-# (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
-
-import os
-import sys
-import getopt
-import re
-import time
-
-def read_log_file (fn):
- str = open (fn).read ()
- str = re.sub ('\n', '', str)
- str = re.sub ('[\t ]+', ' ', str)
-
- deps = []
- autolines = []
- def include_func (match, d = deps):
- d.append (match.group (1))
- return ''
-
- def auto_func (match, a = autolines):
- a.append (match.group (1))
- return ''
-
- str = re.sub ('\\(([/a-z.A-Z_0-9-]+\\.mf)', include_func, str)
- str = re.sub ('@{(.*?)@}', auto_func, str)
-
- return (autolines, deps)
-
-
-class Char_metric:
- def __init__ (self):
- pass
-
-font_family = 'feta'
-
-def parse_logfile (fn):
- autolines, deps = read_log_file (fn)
- charmetrics = []
-
- global_info = {
- 'filename' : os.path.splitext (os.path.basename (fn))[0]
- }
- group = ''
-
- for l in autolines:
- tags = l.split ('@:')
- if tags[0] == 'group':
- group = tags[1]
- elif tags[0] == 'puorg':
- group = ''
- elif tags[0] == 'char':
- name = tags[9]
-
- if group:
- name = group + '.' + name
- m = {
- 'description': tags[1],
- 'name': name,
- 'code': int (tags[2]),
- 'breapth': float (tags[3]),
- 'width': float (tags[4]),
- 'depth': float (tags[5]),
- 'height': float (tags[6]),
- 'wx': float (tags[7]),
- 'wy': float (tags[8]),
- }
- charmetrics.append (m)
- elif tags[0] == 'font':
- global font_family
- font_family = (tags[3])
- # To omit 'GNU' (foundry) from font name proper:
- # name = tags[2:]
- #urg
- if 0: # testing
- tags.append ('Regular')
-
- encoding = re.sub (' ','-', tags[5])
- tags = tags[:-1]
- name = tags[1:]
- global_info['design_size'] = float (tags[4])
- global_info['FontName'] = '-'.join (name)
- global_info['FullName'] = ' '.join (name)
- global_info['FamilyName'] = '-'.join (name[1:-1])
- if 1:
- global_info['Weight'] = tags[4]
- else: # testing
- global_info['Weight'] = tags[-1]
-
- global_info['FontBBox'] = '0 0 1000 1000'
- global_info['Ascender'] = '0'
- global_info['Descender'] = '0'
- global_info['EncodingScheme'] = encoding
-
- elif tags[0] == 'parameter':
- global_info[tags[1]] = tags[2];
-
- return (global_info, charmetrics, deps)
-
-
-
-def character_lisp_table (global_info, charmetrics):
-
- def conv_char_metric (charmetric):
- f = 1.0
- s = """(%s .
-((bbox . (%f %f %f %f))
-(subfont . "%s")
-(subfont-index . %d)
-(attachment . (%f . %f))))
-""" %(charmetric['name'],
- -charmetric['breapth'] * f,
- -charmetric['depth'] * f,
- charmetric['width'] * f,
- charmetric['height'] * f,
- global_info['filename'],
- charmetric['code'],
- charmetric['wx'],
- charmetric['wy'])
-
- return s
-
- s = ''
- for c in charmetrics:
- s += conv_char_metric (c)
-
- return s
-
-
-def global_lisp_table (global_info):
- str = ''
-
- keys = ['staffsize', 'stafflinethickness', 'staff_space',
- 'linethickness', 'black_notehead_width', 'ledgerlinethickness',
- 'design_size',
- 'blot_diameter'
- ]
- for k in keys:
- if global_info.has_key (k):
- str = str + "(%s . %s)\n" % (k,global_info[k])
-
- return str
-
-
-def ps_encoding (name, global_info, charmetrics):
- encs = ['.notdef'] * 256
- for m in charmetrics:
- encs[m['code']] = m['name']
-
-
- s = ('/%s [\n' % name)
- for m in range (0, 256):
- s += (' /%s %% %d\n' % (encs[m], m))
- s += ('] def\n')
- return s
-
-def get_deps (deps, targets):
- s = ''
- for t in targets:
- t = re.sub ( '^\\./', '', t)
- s += ('%s '% t)
- s += (": ")
- for d in deps:
- s += ('%s ' % d)
- s += ('\n')
- return s
-
-def help ():
- sys.stdout.write(r"""Usage: mf-to-table [OPTIONS] LOGFILEs
-
-Generate feta metrics table from preparated feta log.
-
-Options:
- -d, --dep=FILE print dependency info to FILE
- -h, --help print this help
- -l, --ly=FILE name output table
- -o, --outdir=DIR prefix for dependency info
- -p, --package=DIR specify package
-
- """)
- sys.exit (0)
-
-
-(options, files) = \
- getopt.getopt (sys.argv[1:],
- 'a:d:ho:p:t:',
- ['enc=', 'outdir=', 'dep=', 'lisp=',
- 'global-lisp=',
- 'debug', 'help', 'package='])
-
-global_lisp_nm = ''
-char_lisp_nm = ''
-enc_nm = ''
-depfile_nm = ''
-lyfile_nm = ''
-outdir_prefix = '.'
-
-for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--dep' or o == '-d':
- depfile_nm = a
- elif o == '--outdir' or o == '-o':
- outdir_prefix = a
- elif o == '--lisp':
- char_lisp_nm = a
- elif o == '--global-lisp':
- global_lisp_nm = a
- elif o == '--enc':
- enc_nm = a
- elif o== '--help' or o == '-h':
- help()
- elif o == '--debug':
- debug_b = 1
- else:
- print o
- raise getopt.error
-
-base = os.path.splitext (lyfile_nm)[0]
-
-for filenm in files:
- (g, m, deps) = parse_logfile (filenm)
-
- enc_name = 'FetaEncoding'
- if re.search ('parmesan', filenm):
- enc_name = 'ParmesanEncoding'
- elif re.search ('feta-brace', filenm):
- enc_name = 'FetaBraceEncoding'
- elif re.search ('feta-alphabet', filenm):
- enc_name = 'FetaAlphabetEncoding';
-
- open (enc_nm, 'w').write (ps_encoding (enc_name, g, m))
- open (char_lisp_nm, 'w').write (character_lisp_table (g, m))
- open (global_lisp_nm, 'w').write (global_lisp_table (g))
- if depfile_nm:
- open (depfile_nm, 'wb').write (get_deps (deps,
- [base + '.log', base + '.dvi', base + '.pfa',
- depfile_nm,
- base + '.pfb']))
+++ /dev/null
-#! /usr/bin/perl
-
-##################################################
-# Convert stylized Metafont to PostScript Type 1 #
-# By Scott Pakin <scott+mf@pakin.org> #
-##################################################
-
-########################################################################
-# mf2pt1 #
-# Copyright (C) 2008 Scott Pakin #
-# #
-# This program may be distributed and/or modified under the conditions #
-# of the LaTeX Project Public License, either version 1.3c of this #
-# license or (at your option) any later version. #
-# #
-# The latest version of this license is in: #
-# #
-# http://www.latex-project.org/lppl.txt #
-# #
-# and version 1.3c or later is part of all distributions of LaTeX #
-# version 2006/05/20 or later. #
-########################################################################
-
-our $VERSION = "2.4.4"; # mf2pt1 version number
-require 5.6.1; # I haven't tested mf2pt1 with older Perl versions
-
-use File::Basename;
-use File::Spec;
-use Getopt::Long;
-use Pod::Usage;
-use Math::Trig;
-use warnings;
-use strict;
-
-# Define some common encoding vectors.
-my @standardencoding =
- ((map {"_a$_"} (0..31)),
- qw (space exclam quotedbl numbersign dollar percent ampersand
- quoteright parenleft parenright asterisk plus comma hyphen
- period slash zero one two three four five six seven eight
- nine colon semicolon less equal greater question at A B C D E
- F G H I J K L M N O P Q R S T U V W X Y Z bracketleft
- backslash bracketright asciicircum underscore quoteleft a b c
- d e f g h i j k l m n o p q r s t u v w x y z braceleft bar
- braceright asciitilde),
- (map {"_a$_"} (127..160)),
- qw (exclamdown cent sterling fraction yen florin section currency
- quotesingle quotedblleft guillemotleft guilsinglleft
- guilsinglright fi fl _a176 endash dagger daggerdbl
- periodcentered _a181 paragraph bullet quotesinglbase
- quotedblbase quotedblright guillemotright ellipsis
- perthousand _a190 questiondown _a192 grave acute circumflex
- tilde macron breve dotaccent dieresis _a201 ring cedilla
- _a204 hungarumlaut ogonek caron emdash),
- (map {"_a$_"} (209..224)),
- qw (AE _a226 ordfeminine _a228 _a229 _a230 _a231 Lslash Oslash OE
- ordmasculine _a236 _a237 _a238 _a239 _a240 ae _a242 _a243
- _a244 dotlessi _a246 _a247 lslash oslash oe germandbls _a252
- _a253 _a254 _a255));
-my @isolatin1encoding =
- ((map {"_a$_"} (0..31)),
- qw (space exclam quotedbl numbersign dollar percent ampersand
- quoteright parenleft parenright asterisk plus comma minus
- period slash zero one two three four five six seven eight
- nine colon semicolon less equal greater question at A B C D E
- F G H I J K L M N O P Q R S T U V W X Y Z bracketleft
- backslash bracketright asciicircum underscore quoteleft a b c
- d e f g h i j k l m n o p q r s t u v w x y z braceleft bar
- braceright asciitilde),
- (map {"_a$_"} (128..143)),
- qw (dotlessi grave acute circumflex tilde macron breve dotaccent
- dieresis _a153 ring cedilla _a156 hungarumlaut ogonek
- caron space exclamdown cent sterling currency yen brokenbar
- section dieresis copyright ordfeminine guillemotleft
- logicalnot hyphen registered macron degree plusminus
- twosuperior threesuperior acute mu paragraph periodcentered
- cedilla onesuperior ordmasculine guillemotright onequarter
- onehalf threequarters questiondown Agrave Aacute Acircumflex
- Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex
- Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde
- Ograve Oacute Ocircumflex Otilde Odieresis multiply Oslash
- Ugrave Uacute Ucircumflex Udieresis Yacute Thorn germandbls
- agrave aacute acircumflex atilde adieresis aring ae ccedilla
- egrave eacute ecircumflex edieresis igrave iacute icircumflex
- idieresis eth ntilde ograve oacute ocircumflex otilde
- odieresis divide oslash ugrave uacute ucircumflex udieresis
- yacute thorn ydieresis));
-my @ot1encoding =
- qw (Gamma Delta Theta Lambda Xi Pi Sigma Upsilon Phi
- Psi Omega ff fi fl ffi ffl dotlessi dotlessj grave acute caron
- breve macron ring cedilla germandbls ae oe oslash AE OE Oslash
- suppress exclam quotedblright numbersign dollar percent
- ampersand quoteright parenleft parenright asterisk plus comma
- hyphen period slash zero one two three four five six seven
- eight nine colon semicolon exclamdown equal questiondown
- question at A B C D E F G H I J K L M N O P Q R S T U V W X Y
- Z bracketleft quotedblleft bracketright circumflex dotaccent
- quoteleft a b c d e f g h i j k l m n o p q r s t u v w x y z
- endash emdash hungarumlaut tilde dieresis);
-my @t1encoding =
- qw (grave acute circumflex tilde dieresis hungarumlaut ring caron
- breve macron dotaccent cedilla ogonek quotesinglbase
- guilsinglleft guilsinglright quotedblleft quotedblright
- quotedblbase guillemotleft guillemotright endash emdash cwm
- perthousand dotlessi dotlessj ff fi fl ffi ffl space exclam
- quotedbl numbersign dollar percent ampersand quoteright
- parenleft parenright asterisk plus comma hyphen period slash
- zero one two three four five six seven eight nine colon
- semicolon less equal greater question at A B C D E F G H I J K L
- M N O P Q R S T U V W X Y Z bracketleft backslash bracketright
- asciicircum underscore quoteleft a b c d e f g h i j k l m n o p
- q r s t u v w x y z braceleft bar braceright asciitilde
- sfthyphen Abreve Aogonek Cacute Ccaron Dcaron Ecaron Eogonek
- Gbreve Lacute Lcaron Lslash Nacute Ncaron Eng Ohungarumlaut
- Racute Rcaron Sacute Scaron Scedilla Tcaron Tcedilla
- Uhungarumlaut Uring Ydieresis Zacute Zcaron Zdotaccent IJ
- Idotaccent dcroat section abreve aogonek cacute ccaron dcaron
- ecaron eogonek gbreve lacute lcaron lslash nacute ncaron eng
- ohungarumlaut racute rcaron sacute scaron scedilla tcaron
- tcedilla uhungarumlaut uring ydieresis zacute zcaron zdotaccent
- ij exclamdown questiondown sterling Agrave Aacute Acircumflex
- Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex
- Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde Ograve
- Oacute Ocircumflex Otilde Odieresis OE Oslash Ugrave Uacute
- Ucircumflex Udieresis Yacute Thorn SS agrave aacute acircumflex
- atilde adieresis aring ae ccedilla egrave eacute ecircumflex
- edieresis igrave iacute icircumflex idieresis eth ntilde ograve
- oacute ocircumflex otilde odieresis oe oslash ugrave uacute
- ucircumflex udieresis yacute thorn germandbls);
-
-# Define font parameters that the user can override.
-my $fontversion;
-my $creationdate;
-my $comment;
-my $familyname;
-my $weight;
-my $fullname;
-my $fixedpitch;
-my $italicangle;
-my $underlinepos;
-my $underlinethick;
-my $fontname;
-my $uniqueID;
-my $designsize;
-my ($mffile, $pt1file, $pfbfile, $ffscript);
-my $encoding;
-my $rounding;
-my $bpppix;
-
-# Define all of our other global variables.
-my $progname = basename $0, ".pl";
-my $mag;
-my @fontbbox;
-my @charbbox;
-my @charwd;
-my @glyphname;
-my @charfiles;
-my $filebase;
-my $filedir;
-my $filenoext;
-my $versionmsg = "mf2pt1 version $VERSION
-
-Copyright (C) 2008 Scott Pakin
-
-This program may be distributed and/or modified under the conditions
-of the LaTeX Project Public License, either version 1.3c of this
-license or (at your option) any later version.
-
-The latest version of this license is in:
-
- http://www.latex-project.org/lppl.txt
-
-and version 1.3c or later is part of all distributions of LaTeX
-version 2006/05/20 or later.
-";
-
-
-######################################################################
-
-# The routines to compute the fractional approximation of a real number
-# are heavily based on code posted by Ben Tilly
-# <http://www.perlmonks.org/?node_id=26179> on Nov 16th, 2000, to the
-# PerlMonks list. See <http://www.perlmonks.org/index.pl?node_id=41961>.
-
-
-# Takes numerator/denominator pairs.
-# Returns a PS fraction string representation (with a trailing space).
-sub frac_string (@)
-{
- my $res = "";
-
- while (@_) {
- my $n = shift;
- my $d = shift;
- $res .= $n . " ";
- $res .= $d . " div " if $d > 1;
- }
-
- return $res;
-}
-
-
-# Takes a number.
-# Returns a numerator and denominator with the smallest denominator
-# so that the difference of the resulting fraction to the number is
-# smaller or equal to $rounding.
-sub frac_approx ($)
-{
- my $num = shift;
- my $f = ret_frac_iter ($num);
-
- while (1) {
- my ($n, $m) = $f->();
- my $approx = $n / $m;
- my $delta = abs ($num - $approx);
- return ($n, $m) if ($delta <= $rounding);
- }
-}
-
-
-# Takes a number, returns the best integer approximation and (in list
-# context) the error.
-sub best_int ($)
-{
- my $x = shift;
- my $approx = sprintf '%.0f', $x;
- if (wantarray) {
- return ($approx, $x - $approx);
- }
- else {
- return $approx;
- }
-}
-
-
-# Takes a numerator and denominator, in scalar context returns
-# the best fraction describing them, in list the numerator and
-# denominator.
-sub frac_standard ($$)
-{
- my $n = best_int(shift);
- my $m = best_int(shift);
- my $k = gcd($n, $m);
- $n /= $k;
- $m /= $k;
- if ($m < 0) {
- $n *= -1;
- $m *= -1;
- }
- if (wantarray) {
- return ($n, $m);
- }
- else {
- return "$n/$m";
- }
-}
-
-
-# Euclidean algorithm for calculating a GCD.
-# Takes two integers, returns the greatest common divisor.
-sub gcd ($$)
-{
- my ($n, $m) = @_;
- while ($m) {
- my $k = $n % $m;
- ($n, $m) = ($m, $k);
- }
- return $n;
-}
-
-
-# Takes a list of terms in a continued fraction, and converts it
-# into a fraction.
-sub ints_to_frac (@)
-{
- my ($n, $m) = (0, 1); # Start with 0
- while (@_) {
- my $k = pop;
- if ($n) {
- # Want frac for $k + 1/($n/$m)
- ($n, $m) = frac_standard($k*$n + $m, $n);
- }
- else {
- # Want $k
- ($n, $m) = frac_standard($k, 1);
- }
- }
- return frac_standard($n, $m);
-}
-
-
-# Takes a number, returns an anon sub which iterates through a set of
-# fractional approximations that converges very quickly to the number.
-sub ret_frac_iter ($)
-{
- my $x = shift;
- my $term_iter = ret_next_term_iter($x);
- my @ints;
- return sub {
- push @ints, $term_iter->();
- return ints_to_frac(@ints);
- }
-}
-
-
-# Terms of a continued fraction converging on that number.
-sub ret_next_term_iter ($)
-{
- my $x = shift;
- return sub {
- (my $n, $x) = best_int($x);
- if (0 != $x) {
- $x = 1/$x;
- }
- return $n;
- }
-}
-
-######################################################################
-
-# Round a number to the nearest integer.
-sub round ($)
-{
- return int($_[0] + 0.5*($_[0] <=> 0));
-}
-
-
-# Round a number to a given precision.
-sub prec ($)
-{
- return round ($_[0] / $rounding) * $rounding;
-}
-
-
-# Set a variable's value to the first defined value in the given list.
-# If the variable was not previously defined and no value in the list
-# is defined, do nothing.
-sub assign_default (\$@)
-{
- my $varptr = shift; # Pointer to variable to define
- return if defined $$varptr && $$varptr ne "UNSPECIFIED";
- foreach my $val (@_) {
- next if !defined $val;
- $$varptr = $val;
- return;
- }
-}
-
-
-# Print and execute a shell command. An environment variable with the
-# same name as the command overrides the command name. Return 1 on
-# success, 0 on failure. Optionally abort if the command fails, based
-# on the first argument to execute_command.
-sub execute_command ($@)
-{
- my $abort_on_failure = shift;
- my @command = @_;
- $command[0] = $ENV{uc $command[0]} || $command[0];
- my $prettyargs = join (" ", map {/[\\ ]/ ? "'$_'" : $_} @command);
- print "Invoking \"$prettyargs\"...\n";
- my $result = system @command;
- die "${progname}: \"$prettyargs\" failed ($!)\n" if $result && $abort_on_failure;
- return !$result;
-}
-
-
-# Output the font header.
-sub output_header ()
-{
- # Show the initial boilerplate.
- print OUTFILE <<"ENDHEADER";
-%!FontType1-1.0: $fontname $fontversion
-%%CreationDate: $creationdate
-% Font converted to Type 1 by mf2pt1, written by Scott Pakin.
-11 dict begin
-/FontInfo 11 dict dup begin
-/version ($fontversion) readonly def
-/Notice ($comment) readonly def
-/FullName ($fullname) readonly def
-/FamilyName ($familyname) readonly def
-/Weight ($weight) readonly def
-/ItalicAngle $italicangle def
-/isFixedPitch $fixedpitch def
-/UnderlinePosition $underlinepos def
-/UnderlineThickness $underlinethick def
-end readonly def
-/FontName /$fontname def
-ENDHEADER
-
- # If we're not using an encoding that PostScript knows about, then
- # create an encoding vector.
- if ($encoding==\@standardencoding) {
- print OUTFILE "/Encoding StandardEncoding def\n";
- }
- else {
- print OUTFILE "/Encoding 256 array\n";
- print OUTFILE "0 1 255 {1 index exch /.notdef put} for\n";
- foreach my $charnum (0 .. $#{$encoding}) {
- if ($encoding->[$charnum] && $encoding->[$charnum]!~/^_a\d+$/) {
- print OUTFILE "dup $charnum /$encoding->[$charnum] put\n";
- }
- }
- print OUTFILE "readonly def\n";
- }
-
- # Show the final boilerplate.
- print OUTFILE <<"ENDHEADER";
-/PaintType 0 def
-/FontType 1 def
-/FontMatrix [0.001 0 0 0.001 0 0] readonly def
-/UniqueID $uniqueID def
-/FontBBox{@fontbbox}readonly def
-currentdict end
-currentfile eexec
-dup /Private 5 dict dup begin
-/RD{string currentfile exch readstring pop}executeonly def
-/ND{noaccess def}executeonly def
-/NP{noaccess put}executeonly def
-ENDHEADER
-}
-
-
-# Use MetaPost to generate one PostScript file per character. We
-# calculate the font bounding box from these characters and store them
-# in @fontbbox. If the input parameter is 1, set other font
-# parameters, too.
-sub get_bboxes ($)
-{
- execute_command 1, ("mpost", "-mem=mf2pt1", "-progname=mpost",
- "\\mode:=localfont; mag:=$mag; bpppix $bpppix; input $mffile");
- opendir (CURDIR, ".") || die "${progname}: $! ($filedir)\n";
- @charfiles = sort
- { ($a=~ /\.(\d+)$/)[0] <=> ($b=~ /\.(\d+)$/)[0] }
- grep /^$filebase.*\.\d+$/, readdir(CURDIR);
- close CURDIR;
- @fontbbox = (1000000, 1000000, -1000000, -1000000);
- foreach my $psfile (@charfiles) {
- # Read the character number from the output file's extension.
- $psfile =~ /\.(\d+)$/;
- my $charnum = $1;
-
- # Process in turn each line of the current PostScript file.
- my $havebbox = 0;
- open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n";
- while (<PSFILE>) {
- my @tokens = split " ";
- if ($tokens[0] eq "%%BoundingBox:") {
- # Store the MetaPost-produced bounding box, just in case
- # the given font doesn't use beginchar.
- @tokens = ("%", "MF2PT1:", "glyph_dimensions", @tokens[1..4]);
- $havebbox--;
- }
- next if $#tokens<1 || $tokens[1] ne "MF2PT1:";
-
- # Process a "special" inserted into the generated PostScript.
- MF2PT1_CMD:
- {
- # glyph_dimensions llx lly urx ury -- specified glyph dimensions
- $tokens[2] eq "glyph_dimensions" && do {
- my @bbox = @tokens[3..6];
- $fontbbox[0]=$bbox[0] if $bbox[0]<$fontbbox[0];
- $fontbbox[1]=$bbox[1] if $bbox[1]<$fontbbox[1];
- $fontbbox[2]=$bbox[2] if $bbox[2]>$fontbbox[2];
- $fontbbox[3]=$bbox[3] if $bbox[3]>$fontbbox[3];
- $charbbox[$charnum] = \@bbox;
- $havebbox++;
- last MF2PT1_CMD;
- };
-
- # If all we want is the bounding box, exit the loop now.
- last MF2PT1_CMD if !$_[0];
-
- # glyph_name name -- glyph name
- $tokens[2] eq "glyph_name" && do {
- $glyphname[$charnum] = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # charwd wd -- character width as in TFM
- $tokens[2] eq "charwd" && do {
- $charwd[$charnum] = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_identifier name -- full font name
- $tokens[2] eq "font_identifier" && do {
- $fullname = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_size number -- font design size (pt, not bp)
- $tokens[2] eq "font_size" && $tokens[3] && do {
- $designsize = $tokens[3] * 72 / 72.27;
- last MF2PT1_CMD;
- };
-
- # font_slant number -- italic amount
- $tokens[2] eq "font_slant" && do {
- $italicangle = 0 + rad2deg (atan(-$tokens[3]));
- last MF2PT1_CMD;
- };
-
- # font_coding_scheme string -- font encoding
- $tokens[2] eq "font_coding_scheme" && do {
- $encoding = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_version string -- font version number (xxx.yyy)
- $tokens[2] eq "font_version" && do {
- $fontversion = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_comment string -- font comment notice
- $tokens[2] eq "font_comment" && do {
- $comment = join (" ", @tokens[3..$#tokens]);
- last MF2PT1_CMD;
- };
-
- # font_family string -- font family name
- $tokens[2] eq "font_family" && do {
- $familyname = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_weight string -- font weight (e.g., "Book" or "Heavy")
- $tokens[2] eq "font_weight" && do {
- $weight = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_fixed_pitch number -- fixed width font (0=false, 1=true)
- $tokens[2] eq "font_fixed_pitch" && do {
- $fixedpitch = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_underline_position number -- vertical underline position
- $tokens[2] eq "font_underline_position" && do {
- # We store $underlinepos in points and later
- # scale it by 1000/$designsize.
- $underlinepos = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_underline_thickness number -- thickness of underline
- $tokens[2] eq "font_underline_thickness" && do {
- # We store $underlinethick in points and later
- # scale it by 1000/$designsize.
- $underlinethick = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_name string -- font name
- $tokens[2] eq "font_name" && do {
- $fontname = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_unique_id number (as string) -- globally unique font ID
- $tokens[2] eq "font_unique_id" && do {
- $uniqueID = 0+$tokens[3];
- last MF2PT1_CMD;
- };
- }
- }
- close PSFILE;
- if (!$havebbox) {
- warn "${progname}: No beginchar in character $charnum; glyph dimensions are probably incorrect\n";
- }
- }
-}
-
-
-# Convert ordinary, MetaPost-produced PostScript files into Type 1
-# font programs.
-sub output_font_programs ()
-{
- # Iterate over all the characters. We convert each one, line by
- # line and token by token.
- print "Converting PostScript graphics to Type 1 font programs...\n";
- foreach my $psfile (@charfiles) {
- # Initialize the font program.
- $psfile =~ /\.(\d+)$/;
- my $charnum = $1;
- my $gname = $glyphname[$charnum] || $encoding->[$charnum];
- my @fontprog;
- push @fontprog, ("/$gname {",
- frac_string (frac_approx ($charbbox[$charnum]->[0]),
- frac_approx ($charwd[$charnum] * $mag))
- . "hsbw");
- my ($cpx, $cpy) =
- ($charbbox[$charnum]->[0], 0); # Current point (PostScript)
-
- # Iterate over every line in the current file.
- open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n";
- while (my $oneline=<PSFILE>) {
- next if $oneline=~/^\%/;
- next if $oneline=~/set/; # Fortunately, "set" never occurs on "good" lines.
- my @arglist; # Arguments to current PostScript function
-
- # Iterate over every token in the current line.
- TOKENLOOP:
- foreach my $token (split " ", $oneline) {
- # Number: Round and push on the argument list.
- $token =~ /^[-.\d]+$/ && do {
- push @arglist, prec ($&);
- next TOKENLOOP;
- };
-
- # curveto: Convert to vhcurveto, hvcurveto, or rrcurveto.
- $token eq "curveto" && do {
- my ($dx1, $dy1) = ($arglist[0] - $cpx,
- $arglist[1] - $cpy);
- my ($dx1n, $dx1d) = frac_approx ($dx1);
- my ($dy1n, $dy1d) = frac_approx ($dy1);
- $cpx += $dx1n / $dx1d;
- $cpy += $dy1n / $dy1d;
-
- my ($dx2, $dy2) = ($arglist[2] - $cpx,
- $arglist[3] - $cpy);
- my ($dx2n, $dx2d) = frac_approx ($dx2);
- my ($dy2n, $dy2d) = frac_approx ($dy2);
- $cpx += $dx2n / $dx2d;
- $cpy += $dy2n / $dy2d;
-
- my ($dx3, $dy3) = ($arglist[4] - $cpx,
- $arglist[5] - $cpy);
- my ($dx3n, $dx3d) = frac_approx ($dx3);
- my ($dy3n, $dy3d) = frac_approx ($dy3);
- $cpx += $dx3n / $dx3d;
- $cpy += $dy3n / $dy3d;
-
- if (!$dx1n && !$dy3n) {
- push @fontprog, frac_string ($dy1n, $dy1d,
- $dx2n, $dx2d,
- $dy2n, $dy2d,
- $dx3n, $dx3d)
- . "vhcurveto";
- }
- elsif (!$dy1n && !$dx3n) {
- push @fontprog, frac_string ($dx1n, $dx1d,
- $dx2n, $dx2d,
- $dy2n, $dy2d,
- $dy3n, $dy3d)
- . "hvcurveto";
- }
- else {
- push @fontprog, frac_string ($dx1n, $dx1d,
- $dy1n, $dy1d,
- $dx2n, $dx2d,
- $dy2n, $dy2d,
- $dx3n, $dx3d,
- $dy3n, $dy3d)
- . "rrcurveto";
- }
- next TOKENLOOP;
- };
-
- # lineto: Convert to vlineto, hlineto, or rlineto.
- $token eq "lineto" && do {
- my ($dx, $dy) = ($arglist[0] - $cpx,
- $arglist[1] - $cpy);
- my ($dxn, $dxd) = frac_approx ($dx);
- my ($dyn, $dyd) = frac_approx ($dy);
- $cpx += $dxn / $dxd;
- $cpy += $dyn / $dyd;
-
- if (!$dxn) {
- push @fontprog, frac_string ($dyn, $dyd)
- . "vlineto" if $dyn;
- }
- elsif (!$dyn) {
- push @fontprog, frac_string ($dxn, $dxd)
- . "hlineto";
- }
- else {
- push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd)
- . "rlineto";
- }
- next TOKENLOOP;
- };
-
- # moveto: Convert to vmoveto, hmoveto, or rmoveto.
- $token eq "moveto" && do {
- my ($dx, $dy) = ($arglist[0] - $cpx,
- $arglist[1] - $cpy);
- my ($dxn, $dxd) = frac_approx ($dx);
- my ($dyn, $dyd) = frac_approx ($dy);
- $cpx += $dxn / $dxd;
- $cpy += $dyn / $dyd;
-
- if (!$dxn) {
- push @fontprog, frac_string ($dyn, $dyd)
- . "vmoveto";
- }
- elsif (!$dyn) {
- push @fontprog, frac_string ($dxn, $dxd)
- . "hmoveto";
- }
- else {
- push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd)
- . "rmoveto";
- }
- next TOKENLOOP;
- };
-
- # closepath: Output as is.
- $token eq "closepath" && do {
- push @fontprog, $token;
- next TOKENLOOP;
- };
- }
- }
- close PSFILE;
- push @fontprog, ("endchar",
- "} ND");
- print OUTFILE join ("\n\t", @fontprog), "\n";
- }
-}
-
-
-# Output the final set of code for the Type 1 font.
-sub output_trailer ()
-{
- print OUTFILE <<"ENDTRAILER";
-/.notdef {
- 0 @{[$fontbbox[2]-$fontbbox[0]]} hsbw
- endchar
- } ND
-end
-end
-readonly put
-noaccess put
-dup/FontName get exch definefont pop
-mark currentfile closefile
-cleartomark
-ENDTRAILER
-}
-
-######################################################################
-
-# Parse the command line. Asterisks in the following represents
-# commands also defined by Plain Metafont.
-my %opthash = ();
-GetOptions (\%opthash,
- "fontversion=s", # font_version
- "comment=s", # font_comment
- "family=s", # font_family
- "weight=s", # font_weight
- "fullname=s", # font_identifier (*)
- "fixedpitch!", # font_fixed_pitch
- "italicangle=f", # font_slant (*)
- "underpos=f", # font_underline_position
- "underthick=f", # font_underline_thickness
- "name=s", # font_name
- "uniqueid=i", # font_unique_id
- "designsize=f", # font_size (*)
- "encoding=s", # font_coding_scheme (*)
- "rounding=f",
- "bpppix=f",
- "ffscript=s",
- "h|help",
- "V|version") || pod2usage(2);
-if (defined $opthash{"h"}) {
- pod2usage(-verbose => 1,
- -output => \*STDOUT, # Bug workaround for Pod::Usage
- -exitval => "NOEXIT");
- print "Please e-mail bug reports to scott+mf\@pakin.org.\n";
- exit 1;
-}
-do {print $versionmsg; exit 1} if defined $opthash{"V"};
-pod2usage(2) if $#ARGV != 0;
-
-# Extract the filename from the command line.
-$mffile = $ARGV[0];
-my @fileparts = fileparse $mffile, ".mf";
-$filebase = $fileparts[0];
-$filedir = $fileparts[1];
-$filenoext = File::Spec->catfile ($filedir, $filebase);
-$pt1file = $filebase . ".pt1";
-$pfbfile = $filebase . ".pfb";
-
-assign_default $bpppix, $opthash{bpppix}, 0.02;
-
-# Make our first pass through the input, to set values for various options.
-$mag = 100; # Get a more precise bounding box.
-get_bboxes(1); # This might set $designsize.
-
-# Sanity-check the specified precision.
-assign_default $rounding, $opthash{rounding}, 1;
-if ($rounding<=0.0 || $rounding>1.0) {
- die sprintf "%s: Invalid rounding amount \"%g\"; value must be a positive number no greater than 1.0\n", $progname, $rounding;
-}
-
-# Ensure that every user-definable parameter is assigned a value.
-assign_default $fontversion, $opthash{fontversion}, "001.000";
-assign_default $creationdate, scalar localtime;
-assign_default $comment, $opthash{comment}, "Font converted to Type 1 by mf2pt1, written by Scott Pakin.";
-assign_default $weight, $opthash{weight}, "Medium";
-assign_default $fixedpitch, $opthash{fixedpitch}, 0;
-assign_default $uniqueID, $opthash{uniqueid}, int(rand(1000000)) + 4000000;
-assign_default $designsize, $opthash{designsize};
-die "${progname}: a design size must be specified in $mffile or on the command line\n" if !defined $designsize;
-die "${progname}: the design size must be a positive number\n" if $designsize<=0.0;
-assign_default $underlinepos, $opthash{underpos}, -1;
-$underlinepos = round(1000*$underlinepos/$designsize);
-assign_default $underlinethick, $opthash{underthick}, 0.5;
-$underlinethick = round(1000*$underlinethick/$designsize);
-assign_default $fullname, $opthash{fullname}, $filebase;
-assign_default $familyname, $opthash{family}, $fullname;
-assign_default $italicangle, $opthash{italicangle}, 0;
-assign_default $fontname, $opthash{name}, "$familyname-$weight";
-$fontname =~ s/\s//g;
-assign_default $encoding, $opthash{encoding}, "standard";
-my $encoding_name = $encoding;
-ENCODING:
-{
- if (-e $encoding) {
- # Filenames take precedence over built-in encodings.
- my @enc_array;
- open (ENCFILE, "<$encoding") || die "${progname}: $! ($encoding)\n";
- while (my $oneline = <ENCFILE>) {
- $oneline =~ s/\%.*$//;
- foreach my $word (split " ", $oneline) {
- push @enc_array, substr($word, 1) if substr($word, 0, 1) eq "/";
- }
- }
- close ENCFILE;
- $encoding_name = substr (shift @enc_array, 1);
- $encoding = \@enc_array;
- last ENCODING;
- }
- $encoding=\@standardencoding, last ENCODING if $encoding eq "standard";
- $encoding=\@isolatin1encoding, last ENCODING if $encoding eq "isolatin1";
- $encoding=\@ot1encoding, last ENCODING if $encoding eq "ot1";
- $encoding=\@t1encoding, last ENCODING if $encoding eq "t1";
- $encoding=\@glyphname, last ENCODING if $encoding eq "asis";
- warn "${progname}: Unknown encoding \"$encoding\"; using standard Adobe encoding\n";
- $encoding=\@standardencoding; # Default to standard encoding
-}
-assign_default $fixedpitch, $opthash{fixedpitch}, 0;
-$fixedpitch = $fixedpitch ? "true" : "false";
-assign_default $ffscript, $opthash{ffscript};
-
-# Output the final values of all of our parameters.
-print "\n";
-print <<"PARAMVALUES";
-mf2pt1 is using the following font parameters:
- font_version: $fontversion
- font_comment: $comment
- font_family: $familyname
- font_weight: $weight
- font_identifier: $fullname
- font_fixed_pitch: $fixedpitch
- font_slant: $italicangle
- font_underline_position: $underlinepos
- font_underline_thickness: $underlinethick
- font_name: $fontname
- font_unique_id: $uniqueID
- font_size: $designsize (bp)
- font_coding_scheme: $encoding_name
-PARAMVALUES
- ;
-print "\n";
-
-# Scale by a factor of 1000/design size.
-$mag = 1000.0 / $designsize;
-get_bboxes(0);
-print "\n";
-
-# Output the font in disassembled format.
-open (OUTFILE, ">$pt1file") || die "${progname}: $! ($pt1file)\n";
-output_header();
-printf OUTFILE "2 index /CharStrings %d dict dup begin\n",
- 1+scalar(grep {defined($_)} @charbbox);
-output_font_programs();
-output_trailer();
-close OUTFILE;
-unlink @charfiles;
-print "\n";
-
-# Convert from the disassembled font format to Type 1 binary format.
-if (!execute_command 0, ("t1asm", $pt1file, $pfbfile)) {
- die "${progname}: You'll need either to install t1utils and rerun $progname or find another way to convert $pt1file to $pfbfile\n";
- exit 1;
-}
-print "\n";
-unlink $pt1file;
-
-# Use FontForge to autohint the result.
-my $user_script = 0; # 1=script file was provided by the user; 0=created here
-if (defined $ffscript) {
- # The user provided his own script.
- $user_script = 1;
-}
-else {
- # Create a FontForge script file.
- $ffscript = $filebase . ".pe";
- open (FFSCRIPT, ">$ffscript") || die "${progname}: $! ($ffscript)\n";
- print FFSCRIPT <<'AUTOHINT';
-Open($1);
-SelectAll();
-RemoveOverlap();
-AddExtrema();
-Simplify(0, 2);
-CorrectDirection();
-Simplify(0, 2);
-RoundToInt();
-AutoHint();
-Generate($1);
-Quit(0);
-AUTOHINT
- ;
- close FFSCRIPT;
-}
-if (!execute_command 0, ("fontforge", "-script", $ffscript, $pfbfile)) {
- warn "${progname}: You'll need to install FontForge if you want $pfbfile autohinted (not required, but strongly recommended)\n";
-}
-unlink $ffscript if !$user_script;
-print "\n";
-
-# Finish up.
-print "*** Successfully generated $pfbfile! ***\n";
-exit 0;
-
-######################################################################
-
-__END__
-
-=head1 NAME
-
-mf2pt1 - produce a PostScript Type 1 font program from a Metafont source
-
-
-=head1 SYNOPSIS
-
-mf2pt1
-[B<--help>]
-[B<--version>]
-[B<--comment>=I<string>]
-[B<--designsize>=I<number>]
-[B<--encoding>=I<encoding>]
-[B<--family>=I<name>]
-[B<-->[B<no>]B<fixedpitch>]
-[B<--fontversion>=I<MMM.mmm>]
-[B<--fullname>=I<name>]
-[B<--italicangle>=I<number>]
-[B<--name>=I<name>]
-[B<--underpos>=I<number>]
-[B<--underthick>=I<number>]
-[B<--uniqueid>=I<number>]
-[B<--weight>=I<weight>]
-[B<--rounding>=I<number>]
-[B<--bpppix>=I<number>]
-[B<--ffscript>=I<file.pe>]
-I<infile>.mf
-
-
-=head1 WARNING
-
-The B<mf2pt1> Info file is the main source of documentation for
-B<mf2pt1>. This man page is merely a brief summary.
-
-
-=head1 DESCRIPTION
-
-B<mf2pt1> facilitates producing PostScript Type 1 fonts from a
-Metafont source file. It is I<not>, as the name may imply, an
-automatic converter of arbitrary Metafont fonts to Type 1 format.
-B<mf2pt1> imposes a number of restrictions on the Metafont input. If
-these restrictions are met, B<mf2pt1> will produce valid Type 1
-output. (Actually, it produces "disassembled" Type 1; the B<t1asm>
-program from the B<t1utils> suite will convert this to a true Type 1
-font.)
-
-=head2 Usage
-
- mf2pt1 myfont.mf
-
-=head1 OPTIONS
-
-Font parameters are best specified within a Metafont program. If
-necessary, though, command-line options can override any of these
-parameters. The B<mf2pt1> Info page, the primary source of B<mf2pt1>
-documentation, describes the following in greater detail.
-
-=over 4
-
-=item B<--help>
-
-Provide help on B<mf2pt1>'s command-line options.
-
-=item B<--version>
-
-Output the B<mf2pt1> version number, copyright, and license.
-
-=item B<--comment>=I<string>
-
-Include a font comment, usually a copyright notice.
-
-=item B<--designsize>=I<number>
-
-Specify the font design size in points.
-
-=item B<--encoding>=I<encoding>
-
-Designate the font encoding, either the name of a---typically
-F<.enc>---file which contains a PostScript font-encoding vector or one
-of C<standard> (the default), C<ot1>, C<t1>, or C<isolatin1>.
-
-=item B<--family>=I<name>
-
-Specify the font family.
-
-=item B<--fixedpitch>, B<--nofixedpitch>
-
-Assert that the font uses either monospaced (B<--fixedpitch>) or
-proportional (B<--nofixedpitch>) character widths.
-
-=item B<--fontversion>=I<MMM.mmm>
-
-Specify the font's major and minor version number.
-
-=item B<--fullname>=I<name>
-
-Designate the full font name (family plus modifiers).
-
-=item B<--italicangle>=I<number>
-
-Designate the italic angle in degrees counterclockwise from vertical.
-
-=item B<--name>=I<name>
-
-Provide the font name.
-
-=item B<--underpos>=I<number>
-
-Specify the vertical position of the underline in thousandths of the
-font height.
-
-=item B<--underthick>=I<number>
-
-Specify the thickness of the underline in thousandths of the font
-height.
-
-=item B<--uniqueid>=I<number>
-
-Specify a globally unique font identifier.
-
-=item B<--weight>=I<weight>
-
-Provide a description of the font weight (e.g., ``Heavy'').
-
-=item B<--rounding>=I<number>
-
-Specify the fraction of a font unit (0.0 < I<number> <= 1.0) to which
-to round coordinate values [default: 1.0].
-
-=item B<--bpppix>=I<number>
-
-Redefine the number of big points per pixel from 0.02 to I<number>.
-
-=item B<--ffscript>=I<file.pe>
-
-Name a script to pass to FontForge.
-
-=back
-
-
-=head1 FILES
-
-F<mf2pt1.mem> (which is generated from F<mf2pt1.mp> and F<mfplain.mp>)
-
-
-=head1 NOTES
-
-As stated in L</"WARNING">, the complete source of documentation for
-B<mf2pt1> is the Info page, not this man page.
-
-
-=head1 SEE ALSO
-
-mf(1), mpost(1), t1asm(1), fontforge(1)
-
-
-=head1 AUTHOR
-
-Scott Pakin, I<scott+mf@pakin.org>
+++ /dev/null
-#!@PYTHON@
-
-import re
-import os
-
-def new_link_path (link, dir, r):
- l = link.split ('/')
- d = dir.split ('/')
- i = 0
- while i < len(d) and i < len(l) and l[i] == '..':
- if r.match (d[i]):
- del l[i]
- else:
- i += 1
- return '/'.join ([x for x in l if not r.match (x)])
-
-def walk_tree (tree_roots = [],
- process_dirs = '.*',
- exclude_dirs = '',
- find_files = '.*',
- exclude_files = ''):
- """Walk directory trees and.returns (dirs, symlinks, files, extra_files) tuple.
-
- Arguments:
- tree_roots=DIRLIST use DIRLIST as tree roots list
- process_dir=PATTERN only process files in directories named PATTERN
- exclude_dir=PATTERN don't recurse into directories named PATTERN
- find_files=PATTERN filters files which are hardlinked
- exclude_files=PATTERN exclude files named PATTERN
- """
- find_files_re = re.compile (find_files)
- exclude_dirs_re = re.compile (exclude_dirs)
- exclude_files_re = re.compile (exclude_files)
- process_dirs_re = re.compile (process_dirs)
-
- dirs_paths = []
- symlinks_paths = []
- files_paths = []
-
- for d in tree_roots:
- for current_dir, dirs, files in os.walk(d):
- i = 0
- while i < len(dirs):
- if exclude_dirs_re.search (os.path.join (current_dir, dirs[i])):
- del dirs[i]
- else:
- p = os.path.join (current_dir, dirs[i])
- if os.path.islink (p):
- symlinks_paths.append (p)
- i += 1
- if not process_dirs_re.search (current_dir):
- continue
- dirs_paths.append (current_dir)
- for f in files:
- if exclude_files_re.match (f):
- continue
- p = os.path.join (current_dir, f)
- if os.path.islink (p):
- symlinks_paths.append (p)
- elif find_files_re.match (f):
- files_paths.append (p)
- return (dirs_paths, symlinks_paths, files_paths)
+++ /dev/null
-#!/usr/bin/env python
-
-notes = "CDEFGAB"
-alterations = [-1, 0, 1]
-
-def print_note (octave, note, alteration):
- print " <note>\n <pitch>\n <step>%s</step>" % notes[note]
- if alteration <> 0:
- print " <alter>%s</alter>" % alteration
- print " <octave>%s</octave>\n </pitch>\n <duration>1</duration>\n <voice>1</voice>\n <type>quarter</type>\n </note>" % octave
-
-
-print """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
- "http://www.musicxml.org/dtds/partwise.dtd">
-<score-partwise>
- <movement-title>Various piches and interval sizes</movement-title>
- <part-list>
- <score-part id="P1">
- <part-name>MusicXML Part</part-name>
- </score-part>
- </part-list>
- <!--=========================================================-->
- <part id="P1">
- <measure number="1">
- <attributes>
- <divisions>1</divisions>
- <key>
- <fifths>0</fifths>
- <mode>major</mode>
- </key>
- <time symbol="common">
- <beats>2</beats>
- <beat-type>4</beat-type>
- </time>
- <clef>
- <sign>G</sign>
- <line>2</line>
- </clef>
- </attributes>
-"""
-
-start_octave = 5
-
-for octave in (start_octave, start_octave+1):
- for note in (0,1,2,3,4,5,6):
- for alteration in alterations:
- if octave == start_octave and note == 0 and alteration == -1:
- continue
- print_note (octave, note, alteration)
-# if octave == start_octave and note == 0 and alteration == 0:
-# continue
- print_note (start_octave-(octave-start_octave)-(1-(7-note)/7), (7-note)%7, -alteration)
-
-print """ </measure>
- </part>
-</score-partwise>
-"""
+++ /dev/null
-#!/usr/bin/env python
-
-notes = "CDEFGAB"
-alterations = [-1, 0, 1]
-
-def print_measure (nr, fifth, mode, atts1 = "", atts = "", final = ""):
- print """ <measure number="%s">
- <attributes>
-%s <key>
- <fifths>%s</fifths>
- <mode>%s</mode>
- </key>
-%s </attributes>
- <note>
- <pitch>
- <step>C</step>
- <octave>4</octave>
- </pitch>
- <duration>2</duration>
- <voice>1</voice>
- <type>half</type>
- </note>
-%s </measure>""" % (nr, atts1, fifth, mode, atts, final)
-
-first_div = """ <divisions>1</divisions>
-"""
-first_atts = """ <time symbol="common">
- <beats>2</beats>
- <beat-type>4</beat-type>
- </time>
- <clef>
- <sign>G</sign>
- <line>2</line>
- </clef>
-"""
-
-final_barline = """ <barline location="right">
- <bar-style>light-heavy</bar-style>
- </barline>
-"""
-
-print """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
- "http://www.musicxml.org/dtds/partwise.dtd">
-<score-partwise>
- <movement-title>Different Key signatures</movement-title>
- <identification>
- <miscellaneous>
- <miscellaneous-field name="description">Various key signature: from 11
- flats to 11 sharps (each one first one measure in major, then one
- measure in minor)</miscellaneous-field>
- </miscellaneous>
- </identification>
- <part-list>
- <score-part id="P1">
- <part-name>MusicXML Part</part-name>
- </score-part>
- </part-list>
- <!--=========================================================-->
- <part id="P1">"""
-
-max_range = 11
-measure = 0
-for fifth in range(-max_range, max_range+1):
- measure += 1
- if fifth == -max_range:
- print_measure (measure, fifth, "major", first_div, first_atts)
- else:
- print_measure (measure, fifth, "major")
- measure += 1
- if fifth == max_range:
- print_measure (measure, fifth, "minor", "", "", final_barline)
- else:
- print_measure (measure, fifth, "minor")
-
-
-print """ </part>
-</score-partwise>"""
+++ /dev/null
-#!/usr/bin/env python
-
-notes = "CDEFGAB"
-alterations = [-1, 0, 1]
-
-dot_xml = """ <dot/>
-"""
-tie_xml = """ <tie type="%s"/>
-"""
-tie_notation_xml = """ <notations><tied type="%s"/></notations>
-"""
-
-
-def generate_note (duration, end_tie = False):
- if duration < 2:
- (notetype, dur) = ("8th", 1)
- elif duration < 4:
- (notetype, dur) = ("quarter", 2)
- elif duration < 8:
- (notetype, dur) = ("half", 4)
- else:
- (notetype, dur) = ("whole", 8)
- dur_processed = dur
- dot = ""
- if (duration - dur_processed >= dur/2):
- dot = dot_xml
- dur_processed += dur/2
- if (duration - dur_processed >= max(dur/4, 1)):
- dot += dot_xml
- dur_processed += dur/4
- tie = ""
- tie_notation = ""
- if end_tie:
- tie += tie_xml % "stop"
- tie_notation += tie_notation_xml % "stop"
- second_note = None
- if duration - dur_processed > 0:
- second_note = generate_note (duration-dur_processed, True)
- tie += tie_xml % "start"
- tie_notation += tie_notation_xml % "start"
- note = """ <note>
- <pitch>
- <step>C</step>
- <octave>5</octave>
- </pitch>
- <duration>%s</duration>
-%s <voice>1</voice>
- <type>%s</type>
-%s%s </note>""" % (dur_processed, tie, notetype, dot, tie_notation)
- if second_note:
- return "%s\n%s" % (note, second_note)
- else:
- return note
-
-def print_measure (nr, beats, type, params = "", attr = "", attr2 = "", barline = ""):
- duration = 8*beats/type
- note = generate_note (duration)
-
- print """ <measure number="%s">
- <attributes>
-%s <time%s>
- <beats>%s</beats>
- <beat-type>%s</beat-type>
- </time>
-%s </attributes>
-%s
-%s </measure>""" % (nr, attr, params, beats, type, attr2, note, barline)
-
-first_key = """ <divisions>2</divisions>
- <key>
- <fifths>0</fifths>
- <mode>major</mode>
- </key>
-"""
-first_clef = """ <clef>
- <sign>G</sign>
- <line>2</line>
- </clef>
-"""
-
-final_barline = """ <barline location="right">
- <bar-style>light-heavy</bar-style>
- </barline>
-"""
-
-print """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
- "http://www.musicxml.org/dtds/partwise.dtd">
-<score-partwise>
- <identification>
- <miscellaneous>
- <miscellaneous-field name="description">Various time signatures: 2/2
- (alla breve), 4/4 (C), 2/2, 3/2, 2/4, 3/4, 4/4, 5/4, 3/8, 6/8,
- 12/8</miscellaneous-field>
- </miscellaneous>
- </identification>
- <part-list>
- <score-part id="P1">
- <part-name>MusicXML Part</part-name>
- </score-part>
- </part-list>
- <!--=========================================================-->
- <part id="P1">"""
-
-measure = 1
-
-print_measure (measure, 2, 2, " symbol=\"common\"", first_key, first_clef)
-measure += 1
-
-print_measure (measure, 4, 4, " symbol=\"common\"")
-measure += 1
-
-print_measure (measure, 2, 2)
-measure += 1
-
-print_measure (measure, 3, 2)
-measure += 1
-
-print_measure (measure, 2, 4)
-measure += 1
-
-print_measure (measure, 3, 4)
-measure += 1
-
-print_measure (measure, 4, 4)
-measure += 1
-
-print_measure (measure, 5, 4)
-measure += 1
-
-print_measure (measure, 3, 8)
-measure += 1
-
-print_measure (measure, 6, 8)
-measure += 1
-
-print_measure (measure, 12, 8, "", "", "", final_barline)
-measure += 1
-
-print """ </part>
-</score-partwise>"""
+++ /dev/null
-#!/usr/bin/env python
-# mutopia-index.py
-
-import fnmatch
-import getopt
-import os
-import re
-import stat
-import sys
-
-def find (pat, dir):
- f = os.popen ('find %s -name "%s"'% (dir, pat))
- lst = []
- for a in f.readlines():
- a = a[:-1]
- lst.append (a)
- return lst
-
-
-junk_prefix = 'out-www/'
-
-headertext= r"""
-
-<h1>LilyPond samples</h1>
-
-
-<p>You are looking at a page with some LilyPond samples. These files
-are also included in the distribution. The output is completely
-generated from the source file, without any further touch up.
-
-<p>
-
-The pictures are 90 dpi anti-aliased snapshots of the printed output.
-For a good impression of the quality print out the PDF file.
-"""
-
-headertext_nopics= r"""
-<p>No examples were found in this directory.
-"""
-
-#
-# FIXME breaks on multiple strings.
-#
-def read_lilypond_header (fn):
- s = open (fn).read ()
- s = re.sub ('%.*$', '', s)
- s = re.sub ('\n', ' ', s)
-
- dict = {}
- m = re.search (r"""\\header\s*{([^}]*)}""", s)
-
- if m:
- s = m.group (1)
- else:
- return dict
-
- while s:
- m = re.search (r'''\s*(\S+)\s*=\s*"([^"]+)"''', s)
- if m == None:
- s = ''
- else:
- s = s[m.end (0):]
- left = m.group (1)
- right = m.group (2)
-
- left = re.sub ('"', '', left)
- right = re.sub ('"', '', right)
- dict[left] = right
-
- return dict
-
-def help ():
- sys.stdout.write (r'''Usage: mutopia-index [OPTIONS] INFILE OUTFILE
-Generate index for mutopia.
-
-Options:
- -h, --help print this help
- -o, --output=FILE write output to file
- -s, --subdirs=DIR add subdir
- --suffix=SUF specify suffix
-
-''')
- sys.exit (0)
-
-# ugh.
-def gen_list (inputs, file_name):
- sys.stderr.write ("generating HTML list %s" % file_name)
- sys.stderr.write ('\n')
- if file_name:
- list = open (file_name, 'w')
- else:
- list = sys.stdout
- list.write ('''<html><head><title>Rendered Examples</title>
-<style type="text/css">
-hr { border:0; height:1; color: #000000; background-color: #000000; }\n
-</style>
-<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
-</head>''')
-
- list.write ('<body bgcolor=white>\n')
-
- if inputs:
- list.write (headertext)
- else:
- list.write (headertext_nopics)
-
- for ex in inputs:
- print ex
-
- (base, ext) = os.path.splitext (ex)
- (base, ext2) = os.path.splitext (base)
- ext = ext2 + ext
-
- header = read_lilypond_header (ex)
- head = header.get ('title', os.path.basename (base))
- composer = header.get ('composer', '')
- desc = header.get ('description', '')
- list.write ('<hr>\n')
- list.write ('<h1>%s</h1>\n' % head);
- if composer:
- list.write ('<h2>%s</h2>\n' % composer)
- if desc:
- list.write ('%s<p>' % desc)
- list.write ('<ul>\n')
-
- def list_item (file_name, desc, type, lst = list):
- if os.path.isfile (file_name):
- lst.write ('<li><a href="%s">%s</a>'
- % (re.sub (junk_prefix, '', file_name), desc))
-
- # FIXME: include warning if it uses \include
- # files.
-
- size = os.stat (file_name)[stat.ST_SIZE]
- kB = (size + 512) / 1024
- if kB:
- lst.write (' (%s %d kB)' % (type, kB))
- else:
- lst.write (' (%s %d characters)'
- % (type, size))
- pictures = ['jpeg', 'png', 'xpm']
- lst.write ('\n')
- else:
- print "cannot find" , `file_name`
-
- list_item (base + ext, 'The input', 'ASCII')
-
- pages_found = 0
- for page in range (1, 100):
- f = base + '-page%d.png' % page
-
- if not os.path.isfile (f):
- break
- pages_found += 1
- list_item (f, 'See a picture of page %d' % page, 'png')
-
- if pages_found == 0 and os.path.exists (base + '.png'):
- list_item (base + ".png",
- 'See a picture', 'png')
-
-
- list_item (base + '.pdf', 'Print', 'PDF')
- list_item (base + '.midi', 'Listen', 'MIDI')
- list.write ('</ul>\n');
-
- list.write ('</body></html>\n');
- list.close ()
-
-(options, files) = getopt.getopt (sys.argv[1:],
- 'ho:', ['help', 'output='])
-outfile = 'examples.html'
-
-subdirs = []
-for (o, a) in options:
- if o == '--help' or o == '-h':
- help ()
- elif o == '--output' or o == '-o':
- outfile = a
-
-dirs = []
-for f in files:
- dirs += find ('out-www', f)
-
-if not dirs:
- dirs = ['.']
-
-allfiles = []
-
-for d in dirs:
- allfiles += find ('*.ly', d)
-
-allfiles = [f for f in allfiles
- if not f.endswith ('snippet-map.ly')
- and not re.search ('lily-[0-9a-f]+', f)
- and 'musicxml' not in f]
-
-gen_list (allfiles, outfile)
+++ /dev/null
-#!@TARGET_PYTHON@
-import sys
-import optparse
-import os
-import math
-
-## so we can call directly as buildscripts/output-distance.py
-me_path = os.path.abspath (os.path.split (sys.argv[0])[0])
-sys.path.insert (0, me_path + '/../python/')
-sys.path.insert (0, me_path + '/../python/out/')
-
-
-X_AXIS = 0
-Y_AXIS = 1
-INFTY = 1e6
-
-OUTPUT_EXPRESSION_PENALTY = 1
-ORPHAN_GROB_PENALTY = 1
-options = None
-
-################################################################
-# system interface.
-temp_dir = None
-class TempDirectory:
- def __init__ (self):
- import tempfile
- self.dir = tempfile.mkdtemp ()
- print 'dir is', self.dir
- def __del__ (self):
- print 'rm -rf %s' % self.dir
- os.system ('rm -rf %s' % self.dir)
- def __call__ (self):
- return self.dir
-
-
-def get_temp_dir ():
- global temp_dir
- if not temp_dir:
- temp_dir = TempDirectory ()
- return temp_dir ()
-
-def read_pipe (c):
- print 'pipe' , c
- return os.popen (c).read ()
-
-def system (c):
- print 'system' , c
- s = os.system (c)
- if s :
- raise Exception ("failed")
- return
-
-def shorten_string (s):
- threshold = 15
- if len (s) > 2*threshold:
- s = s[:threshold] + '..' + s[-threshold:]
- return s
-
-def max_distance (x1, x2):
- dist = 0.0
-
- for (p,q) in zip (x1, x2):
- dist = max (abs (p-q), dist)
-
- return dist
-
-
-def compare_png_images (old, new, dest_dir):
- def png_dims (f):
- m = re.search ('([0-9]+) x ([0-9]+)', read_pipe ('file %s' % f))
-
- return tuple (map (int, m.groups ()))
-
- dest = os.path.join (dest_dir, new.replace ('.png', '.compare.jpeg'))
- try:
- dims1 = png_dims (old)
- dims2 = png_dims (new)
- except AttributeError:
- ## hmmm. what to do?
- system ('touch %(dest)s' % locals ())
- return
-
- dims = (min (dims1[0], dims2[0]),
- min (dims1[1], dims2[1]))
-
- dir = get_temp_dir ()
- system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop1.png' % (dims + (old, dir)))
- system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop2.png' % (dims + (new, dir)))
-
- system ('compare -depth 8 %(dir)s/crop1.png %(dir)s/crop2.png %(dir)s/diff.png' % locals ())
-
- system ("convert -depth 8 %(dir)s/diff.png -blur 0x3 -negate -channel alpha,blue -type TrueColorMatte -fx 'intensity' %(dir)s/matte.png" % locals ())
-
- system ("composite -compose atop -quality 65 %(dir)s/matte.png %(new)s %(dest)s" % locals ())
-
-
-################################################################
-# interval/bbox arithmetic.
-
-empty_interval = (INFTY, -INFTY)
-empty_bbox = (empty_interval, empty_interval)
-
-def interval_is_empty (i):
- return i[0] > i[1]
-
-def interval_length (i):
- return max (i[1]-i[0], 0)
-
-def interval_union (i1, i2):
- return (min (i1[0], i2[0]),
- max (i1[1], i2[1]))
-
-def interval_intersect (i1, i2):
- return (max (i1[0], i2[0]),
- min (i1[1], i2[1]))
-
-def bbox_is_empty (b):
- return (interval_is_empty (b[0])
- or interval_is_empty (b[1]))
-
-def bbox_union (b1, b2):
- return (interval_union (b1[X_AXIS], b2[X_AXIS]),
- interval_union (b2[Y_AXIS], b2[Y_AXIS]))
-
-def bbox_intersection (b1, b2):
- return (interval_intersect (b1[X_AXIS], b2[X_AXIS]),
- interval_intersect (b2[Y_AXIS], b2[Y_AXIS]))
-
-def bbox_area (b):
- return interval_length (b[X_AXIS]) * interval_length (b[Y_AXIS])
-
-def bbox_diameter (b):
- return max (interval_length (b[X_AXIS]),
- interval_length (b[Y_AXIS]))
-
-
-def difference_area (a, b):
- return bbox_area (a) - bbox_area (bbox_intersection (a,b))
-
-class GrobSignature:
- def __init__ (self, exp_list):
- (self.name, self.origin, bbox_x,
- bbox_y, self.output_expression) = tuple (exp_list)
-
- self.bbox = (bbox_x, bbox_y)
- self.centroid = (bbox_x[0] + bbox_x[1], bbox_y[0] + bbox_y[1])
-
- def __repr__ (self):
- return '%s: (%.2f,%.2f), (%.2f,%.2f)\n' % (self.name,
- self.bbox[0][0],
- self.bbox[0][1],
- self.bbox[1][0],
- self.bbox[1][1])
-
- def axis_centroid (self, axis):
- return apply (sum, self.bbox[axis]) / 2
-
- def centroid_distance (self, other, scale):
- return max_distance (self.centroid, other.centroid) / scale
-
- def bbox_distance (self, other):
- divisor = bbox_area (self.bbox) + bbox_area (other.bbox)
-
- if divisor:
- return (difference_area (self.bbox, other.bbox) +
- difference_area (other.bbox, self.bbox)) / divisor
- else:
- return 0.0
-
- def expression_distance (self, other):
- if self.output_expression == other.output_expression:
- return 0
- else:
- return 1
-
-################################################################
-# single System.
-
-class SystemSignature:
- def __init__ (self, grob_sigs):
- d = {}
- for g in grob_sigs:
- val = d.setdefault (g.name, [])
- val += [g]
-
- self.grob_dict = d
- self.set_all_bbox (grob_sigs)
-
- def set_all_bbox (self, grobs):
- self.bbox = empty_bbox
- for g in grobs:
- self.bbox = bbox_union (g.bbox, self.bbox)
-
- def closest (self, grob_name, centroid):
- min_d = INFTY
- min_g = None
- try:
- grobs = self.grob_dict[grob_name]
-
- for g in grobs:
- d = max_distance (g.centroid, centroid)
- if d < min_d:
- min_d = d
- min_g = g
-
-
- return min_g
-
- except KeyError:
- return None
- def grobs (self):
- return reduce (lambda x,y: x+y, self.grob_dict.values(), [])
-
-################################################################
-## comparison of systems.
-
-class SystemLink:
- def __init__ (self, system1, system2):
- self.system1 = system1
- self.system2 = system2
-
- self.link_list_dict = {}
- self.back_link_dict = {}
-
-
- ## pairs
- self.orphans = []
-
- ## pair -> distance
- self.geo_distances = {}
-
- ## pairs
- self.expression_changed = []
-
- self._geometric_distance = None
- self._expression_change_count = None
- self._orphan_count = None
-
- for g in system1.grobs ():
-
- ## skip empty bboxes.
- if bbox_is_empty (g.bbox):
- continue
-
- closest = system2.closest (g.name, g.centroid)
-
- self.link_list_dict.setdefault (closest, [])
- self.link_list_dict[closest].append (g)
- self.back_link_dict[g] = closest
-
-
- def calc_geometric_distance (self):
- total = 0.0
- for (g1,g2) in self.back_link_dict.items ():
- if g2:
- d = g1.bbox_distance (g2)
- if d:
- self.geo_distances[(g1,g2)] = d
-
- total += d
-
- self._geometric_distance = total
-
- def calc_orphan_count (self):
- count = 0
- for (g1, g2) in self.back_link_dict.items ():
- if g2 == None:
- self.orphans.append ((g1, None))
-
- count += 1
-
- self._orphan_count = count
-
- def calc_output_exp_distance (self):
- d = 0
- for (g1,g2) in self.back_link_dict.items ():
- if g2:
- d += g1.expression_distance (g2)
-
- self._expression_change_count = d
-
- def output_expression_details_string (self):
- return ', '.join ([g1.name for g1 in self.expression_changed])
-
- def geo_details_string (self):
- results = [(d, g1,g2) for ((g1, g2), d) in self.geo_distances.items()]
- results.sort ()
- results.reverse ()
-
- return ', '.join (['%s: %f' % (g1.name, d) for (d, g1, g2) in results])
-
- def orphan_details_string (self):
- return ', '.join (['%s-None' % g1.name for (g1,g2) in self.orphans if g2==None])
-
- def geometric_distance (self):
- if self._geometric_distance == None:
- self.calc_geometric_distance ()
- return self._geometric_distance
-
- def orphan_count (self):
- if self._orphan_count == None:
- self.calc_orphan_count ()
-
- return self._orphan_count
-
- def output_expression_change_count (self):
- if self._expression_change_count == None:
- self.calc_output_exp_distance ()
- return self._expression_change_count
-
- def distance (self):
- return (self.output_expression_change_count (),
- self.orphan_count (),
- self.geometric_distance ())
-
-def read_signature_file (name):
- print 'reading', name
-
- entries = open (name).read ().split ('\n')
- def string_to_tup (s):
- return tuple (map (float, s.split (' ')))
-
- def string_to_entry (s):
- fields = s.split('@')
- fields[2] = string_to_tup (fields[2])
- fields[3] = string_to_tup (fields[3])
-
- return tuple (fields)
-
- entries = [string_to_entry (e) for e in entries
- if e and not e.startswith ('#')]
-
- grob_sigs = [GrobSignature (e) for e in entries]
- sig = SystemSignature (grob_sigs)
- return sig
-
-
-################################################################
-# different systems of a .ly file.
-
-hash_to_original_name = {}
-
-class FileLink:
- def __init__ (self, f1, f2):
- self._distance = None
- self.file_names = (f1, f2)
-
- def text_record_string (self):
- return '%-30f %-20s\n' % (self.distance (),
- self.name ()
- + os.path.splitext (self.file_names[1])[1]
- )
-
- def calc_distance (self):
- return 0.0
-
- def distance (self):
- if self._distance == None:
- self._distance = self.calc_distance ()
-
- return self._distance
-
- def source_file (self):
- for ext in ('.ly', '.ly.txt'):
- base = os.path.splitext (self.file_names[1])[0]
- f = base + ext
- if os.path.exists (f):
- return f
-
- return ''
-
- def name (self):
- base = os.path.basename (self.file_names[1])
- base = os.path.splitext (base)[0]
- base = hash_to_original_name.get (base, base)
- base = os.path.splitext (base)[0]
- return base
-
- def extension (self):
- return os.path.splitext (self.file_names[1])[1]
-
- def link_files_for_html (self, dest_dir):
- for f in self.file_names:
- link_file (f, os.path.join (dest_dir, f))
-
- def get_distance_details (self):
- return ''
-
- def get_cell (self, oldnew):
- return ''
-
- def get_file (self, oldnew):
- return self.file_names[oldnew]
-
- def html_record_string (self, dest_dir):
- dist = self.distance()
-
- details = self.get_distance_details ()
- if details:
- details_base = os.path.splitext (self.file_names[1])[0]
- details_base += '.details.html'
- fn = dest_dir + '/' + details_base
- open_write_file (fn).write (details)
-
- details = '<br>(<a href="%(details_base)s">details</a>)' % locals ()
-
- cell1 = self.get_cell (0)
- cell2 = self.get_cell (1)
-
- name = self.name () + self.extension ()
- file1 = self.get_file (0)
- file2 = self.get_file (1)
-
- return '''<tr>
-<td>
-%(dist)f
-%(details)s
-</td>
-<td>%(cell1)s<br><font size=-2><a href="%(file1)s"><tt>%(name)s</tt></font></td>
-<td>%(cell2)s<br><font size=-2><a href="%(file2)s"><tt>%(name)s</tt></font></td>
-</tr>''' % locals ()
-
-
-class FileCompareLink (FileLink):
- def __init__ (self, f1, f2):
- FileLink.__init__ (self, f1, f2)
- self.contents = (self.get_content (self.file_names[0]),
- self.get_content (self.file_names[1]))
-
-
- def calc_distance (self):
- ## todo: could use import MIDI to pinpoint
- ## what & where changed.
-
- if self.contents[0] == self.contents[1]:
- return 0.0
- else:
- return 100.0;
-
- def get_content (self, f):
- print 'reading', f
- s = open (f).read ()
- return s
-
-
-class GitFileCompareLink (FileCompareLink):
- def get_cell (self, oldnew):
- str = self.contents[oldnew]
-
- # truncate long lines
- str = '\n'.join ([l[:80] for l in str.split ('\n')])
-
-
- str = '<font size="-2"><pre>%s</pre></font>' % str
- return str
-
- def calc_distance (self):
- if self.contents[0] == self.contents[1]:
- d = 0.0
- else:
- d = 1.0001 *options.threshold
-
- return d
-
-
-class TextFileCompareLink (FileCompareLink):
- def calc_distance (self):
- import difflib
- diff = difflib.unified_diff (self.contents[0].strip().split ('\n'),
- self.contents[1].strip().split ('\n'),
- fromfiledate = self.file_names[0],
- tofiledate = self.file_names[1]
- )
-
- self.diff_lines = [l for l in diff]
- self.diff_lines = self.diff_lines[2:]
-
- return math.sqrt (float (len ([l for l in self.diff_lines if l[0] in '-+'])))
-
- def get_cell (self, oldnew):
- str = ''
- if oldnew == 1:
- str = '\n'.join ([d.replace ('\n','') for d in self.diff_lines])
- str = '<font size="-2"><pre>%s</pre></font>' % str
- return str
-
-class LogFileCompareLink (TextFileCompareLink):
- def get_content (self, f):
- c = TextFileCompareLink.get_content (self, f)
- c = re.sub ("\nProcessing `[^\n]+'\n", '', c)
- return c
-
-class ProfileFileLink (FileCompareLink):
- def __init__ (self, f1, f2):
- FileCompareLink.__init__ (self, f1, f2)
- self.results = [{}, {}]
-
- def get_cell (self, oldnew):
- str = ''
- for k in ('time', 'cells'):
- if oldnew==0:
- str += '%-8s: %d\n' % (k, int (self.results[oldnew][k]))
- else:
- str += '%-8s: %8d (%5.3f)\n' % (k, int (self.results[oldnew][k]),
- self.get_ratio (k))
-
- return '<pre>%s</pre>' % str
-
- def get_ratio (self, key):
- (v1,v2) = (self.results[0].get (key, -1),
- self.results[1].get (key, -1))
-
- if v1 <= 0 or v2 <= 0:
- return 0.0
-
- return (v1 - v2) / float (v1+v2)
-
- def calc_distance (self):
- for oldnew in (0,1):
- def note_info (m):
- self.results[oldnew][m.group(1)] = float (m.group (2))
-
- re.sub ('([a-z]+): ([-0-9.]+)\n',
- note_info, self.contents[oldnew])
-
- dist = 0.0
- factor = {
- 'time': 0.1,
- 'cells': 5.0,
- }
-
- for k in ('time', 'cells'):
- real_val = math.tan (self.get_ratio (k) * 0.5 * math.pi)
- dist += math.exp (math.fabs (real_val) * factor[k]) - 1
-
- dist = min (dist, 100)
- return dist
-
-
-class MidiFileLink (TextFileCompareLink):
- def get_content (self, oldnew):
- import midi
-
- data = FileCompareLink.get_content (self, oldnew)
- midi = midi.parse (data)
- tracks = midi[1]
-
- str = ''
- j = 0
- for t in tracks:
- str += 'track %d' % j
- j += 1
-
- for e in t:
- ev_str = repr (e)
- if re.search ('LilyPond [0-9.]+', ev_str):
- continue
-
- str += ' ev %s\n' % `e`
- return str
-
-
-
-class SignatureFileLink (FileLink):
- def __init__ (self, f1, f2 ):
- FileLink.__init__ (self, f1, f2)
- self.system_links = {}
-
- def add_system_link (self, link, number):
- self.system_links[number] = link
-
- def calc_distance (self):
- d = 0.0
-
- orphan_distance = 0.0
- for l in self.system_links.values ():
- d = max (d, l.geometric_distance ())
- orphan_distance += l.orphan_count ()
-
- return d + orphan_distance
-
- def add_file_compare (self, f1, f2):
- system_index = []
-
- def note_system_index (m):
- system_index.append (int (m.group (1)))
- return ''
-
- base1 = re.sub ("-([0-9]+).signature", note_system_index, f1)
- base2 = re.sub ("-([0-9]+).signature", note_system_index, f2)
-
- self.base_names = (os.path.normpath (base1),
- os.path.normpath (base2))
-
- s1 = read_signature_file (f1)
- s2 = read_signature_file (f2)
-
- link = SystemLink (s1, s2)
-
- self.add_system_link (link, system_index[0])
-
-
- def create_images (self, dest_dir):
-
- files_created = [[], []]
- for oldnew in (0, 1):
- pat = self.base_names[oldnew] + '.eps'
-
- for f in glob.glob (pat):
- infile = f
- outfile = (dest_dir + '/' + f).replace ('.eps', '.png')
- data_option = ''
- if options.local_data_dir:
- data_option = ('-slilypond-datadir=%s/../share/lilypond/current '
- % os.path.dirname(infile))
-
- mkdir (os.path.split (outfile)[0])
- cmd = ('gs -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4 '
- ' %(data_option)s '
- ' -r101 '
- ' -sOutputFile=%(outfile)s -dNOSAFER -dEPSCrop -q -dNOPAUSE '
- ' %(infile)s -c quit ') % locals ()
-
- files_created[oldnew].append (outfile)
- system (cmd)
-
- return files_created
-
- def link_files_for_html (self, dest_dir):
- FileLink.link_files_for_html (self, dest_dir)
- to_compare = [[], []]
-
- exts = []
- if options.create_images:
- to_compare = self.create_images (dest_dir)
- else:
- exts += ['.png', '-page*png']
-
- for ext in exts:
- for oldnew in (0,1):
- for f in glob.glob (self.base_names[oldnew] + ext):
- dst = dest_dir + '/' + f
- link_file (f, dst)
-
- if f.endswith ('.png'):
- to_compare[oldnew].append (f)
-
- if options.compare_images:
- for (old, new) in zip (to_compare[0], to_compare[1]):
- compare_png_images (old, new, dest_dir)
-
-
- def get_cell (self, oldnew):
- def img_cell (ly, img, name):
- if not name:
- name = 'source'
- else:
- name = '<tt>%s</tt>' % name
-
- return '''
-<a href="%(img)s">
-<img src="%(img)s" style="border-style: none; max-width: 500px;">
-</a><br>
-''' % locals ()
- def multi_img_cell (ly, imgs, name):
- if not name:
- name = 'source'
- else:
- name = '<tt>%s</tt>' % name
-
- imgs_str = '\n'.join (['''<a href="%s">
-<img src="%s" style="border-style: none; max-width: 500px;">
-</a><br>''' % (img, img)
- for img in imgs])
-
-
- return '''
-%(imgs_str)s
-''' % locals ()
-
-
-
- def cell (base, name):
- pat = base + '-page*.png'
- pages = glob.glob (pat)
-
- if pages:
- return multi_img_cell (base + '.ly', sorted (pages), name)
- else:
- return img_cell (base + '.ly', base + '.png', name)
-
-
-
- str = cell (os.path.splitext (self.file_names[oldnew])[0], self.name ())
- if options.compare_images and oldnew == 1:
- str = str.replace ('.png', '.compare.jpeg')
-
- return str
-
-
- def get_distance_details (self):
- systems = self.system_links.items ()
- systems.sort ()
-
- html = ""
- for (c, link) in systems:
- e = '<td>%d</td>' % c
- for d in link.distance ():
- e += '<td>%f</td>' % d
-
- e = '<tr>%s</tr>' % e
-
- html += e
-
- e = '<td>%d</td>' % c
- for s in (link.output_expression_details_string (),
- link.orphan_details_string (),
- link.geo_details_string ()):
- e += "<td>%s</td>" % s
-
-
- e = '<tr>%s</tr>' % e
- html += e
-
- original = self.name ()
- html = '''<html>
-<head>
-<title>comparison details for %(original)s</title>
-</head>
-<body>
-<table border=1>
-<tr>
-<th>system</th>
-<th>output</th>
-<th>orphan</th>
-<th>geo</th>
-</tr>
-
-%(html)s
-</table>
-
-</body>
-</html>
-''' % locals ()
- return html
-
-
-################################################################
-# Files/directories
-
-import glob
-import re
-
-def compare_signature_files (f1, f2):
- s1 = read_signature_file (f1)
- s2 = read_signature_file (f2)
-
- return SystemLink (s1, s2).distance ()
-
-def paired_files (dir1, dir2, pattern):
- """
- Search DIR1 and DIR2 for PATTERN.
-
- Return (PAIRED, MISSING-FROM-2, MISSING-FROM-1)
-
- """
-
- files = []
- for d in (dir1,dir2):
- found = [os.path.split (f)[1] for f in glob.glob (d + '/' + pattern)]
- found = dict ((f, 1) for f in found)
- files.append (found)
-
- pairs = []
- missing = []
- for f in files[0]:
- try:
- files[1].pop (f)
- pairs.append (f)
- except KeyError:
- missing.append (f)
-
- return (pairs, files[1].keys (), missing)
-
-class ComparisonData:
- def __init__ (self):
- self.result_dict = {}
- self.missing = []
- self.added = []
- self.file_links = {}
-
- def read_sources (self):
-
- ## ugh: drop the .ly.txt
- for (key, val) in self.file_links.items ():
-
- def note_original (match, ln=val):
- key = ln.name ()
- hash_to_original_name[key] = match.group (1)
- return ''
-
- sf = val.source_file ()
- if sf:
- re.sub (r'\\sourcefilename "([^"]+)"',
- note_original, open (sf).read ())
- else:
- print 'no source for', val
-
- def compare_trees (self, dir1, dir2):
- self.compare_directories (dir1, dir2)
-
- (root, dirs, files) = os.walk (dir1).next ()
- for d in dirs:
- d1 = os.path.join (dir1, d)
- d2 = os.path.join (dir2, d)
-
- if os.path.islink (d1) or os.path.islink (d2):
- continue
-
- if os.path.isdir (d2):
- self.compare_trees (d1, d2)
-
- def compare_directories (self, dir1, dir2):
- for ext in ['signature',
- 'midi',
- 'log',
- 'profile',
- 'gittxt']:
- (paired, m1, m2) = paired_files (dir1, dir2, '*.' + ext)
-
- self.missing += [(dir1, m) for m in m1]
- self.added += [(dir2, m) for m in m2]
-
- for p in paired:
- if (options.max_count
- and len (self.file_links) > options.max_count):
- continue
-
- f2 = dir2 + '/' + p
- f1 = dir1 + '/' + p
- self.compare_files (f1, f2)
-
- def compare_files (self, f1, f2):
- if f1.endswith ('signature'):
- self.compare_signature_files (f1, f2)
- else:
- ext = os.path.splitext (f1)[1]
- klasses = {
- '.midi': MidiFileLink,
- '.log' : LogFileCompareLink,
- '.profile': ProfileFileLink,
- '.gittxt': GitFileCompareLink,
- }
-
- if klasses.has_key (ext):
- self.compare_general_files (klasses[ext], f1, f2)
-
- def compare_general_files (self, klass, f1, f2):
- name = os.path.split (f1)[1]
-
- file_link = klass (f1, f2)
- self.file_links[name] = file_link
-
- def compare_signature_files (self, f1, f2):
- name = os.path.split (f1)[1]
- name = re.sub ('-[0-9]+.signature', '', name)
-
- file_link = None
- try:
- file_link = self.file_links[name]
- except KeyError:
- generic_f1 = re.sub ('-[0-9]+.signature', '.ly', f1)
- generic_f2 = re.sub ('-[0-9]+.signature', '.ly', f2)
- file_link = SignatureFileLink (generic_f1, generic_f2)
- self.file_links[name] = file_link
-
- file_link.add_file_compare (f1, f2)
-
- def write_changed (self, dest_dir, threshold):
- (changed, below, unchanged) = self.thresholded_results (threshold)
-
- str = '\n'.join ([os.path.splitext (link.file_names[1])[0]
- for link in changed])
- fn = dest_dir + '/changed.txt'
-
- open_write_file (fn).write (str)
-
- def thresholded_results (self, threshold):
- ## todo: support more scores.
- results = [(link.distance(), link)
- for link in self.file_links.values ()]
- results.sort ()
- results.reverse ()
-
- unchanged = [r for (d,r) in results if d == 0.0]
- below = [r for (d,r) in results if threshold >= d > 0.0]
- changed = [r for (d,r) in results if d > threshold]
-
- return (changed, below, unchanged)
-
- def write_text_result_page (self, filename, threshold):
- out = None
- if filename == '':
- out = sys.stdout
- else:
- print 'writing "%s"' % filename
- out = open_write_file (filename)
-
- (changed, below, unchanged) = self.thresholded_results (threshold)
-
-
- for link in changed:
- out.write (link.text_record_string ())
-
- out.write ('\n\n')
- out.write ('%d below threshold\n' % len (below))
- out.write ('%d unchanged\n' % len (unchanged))
-
- def create_text_result_page (self, dir1, dir2, dest_dir, threshold):
- self.write_text_result_page (dest_dir + '/index.txt', threshold)
-
- def create_html_result_page (self, dir1, dir2, dest_dir, threshold):
- dir1 = dir1.replace ('//', '/')
- dir2 = dir2.replace ('//', '/')
-
- (changed, below, unchanged) = self.thresholded_results (threshold)
-
-
- html = ''
- old_prefix = os.path.split (dir1)[1]
- for link in changed:
- html += link.html_record_string (dest_dir)
-
-
- short_dir1 = shorten_string (dir1)
- short_dir2 = shorten_string (dir2)
- html = '''<html>
-<table rules="rows" border bordercolor="blue">
-<tr>
-<th>distance</th>
-<th>%(short_dir1)s</th>
-<th>%(short_dir2)s</th>
-</tr>
-%(html)s
-</table>
-</html>''' % locals()
-
- html += ('<p>')
- below_count = len (below)
-
- if below_count:
- html += ('<p>%d below threshold</p>' % below_count)
-
- html += ('<p>%d unchanged</p>' % len (unchanged))
-
- dest_file = dest_dir + '/index.html'
- open_write_file (dest_file).write (html)
-
-
- for link in changed:
- link.link_files_for_html (dest_dir)
-
-
- def print_results (self, threshold):
- self.write_text_result_page ('', threshold)
-
-def compare_trees (dir1, dir2, dest_dir, threshold):
- data = ComparisonData ()
- data.compare_trees (dir1, dir2)
- data.read_sources ()
-
-
- data.print_results (threshold)
-
- if os.path.isdir (dest_dir):
- system ('rm -rf %s '% dest_dir)
-
- data.write_changed (dest_dir, threshold)
- data.create_html_result_page (dir1, dir2, dest_dir, threshold)
- data.create_text_result_page (dir1, dir2, dest_dir, threshold)
-
-################################################################
-# TESTING
-
-def mkdir (x):
- if not os.path.isdir (x):
- print 'mkdir', x
- os.makedirs (x)
-
-def link_file (x, y):
- mkdir (os.path.split (y)[0])
- try:
- print x, '->', y
- os.link (x, y)
- except OSError, z:
- print 'OSError', x, y, z
- raise OSError
-
-def open_write_file (x):
- d = os.path.split (x)[0]
- mkdir (d)
- return open (x, 'w')
-
-
-def system (x):
-
- print 'invoking', x
- stat = os.system (x)
- assert stat == 0
-
-
-def test_paired_files ():
- print paired_files (os.environ["HOME"] + "/src/lilypond/scripts/",
- os.environ["HOME"] + "/src/lilypond-stable/buildscripts/", '*.py')
-
-
-def test_compare_trees ():
- system ('rm -rf dir1 dir2')
- system ('mkdir dir1 dir2')
- system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir1')
- system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir2')
- system ('cp 20expr{-*.signature,.ly,.png,.eps,.log,.profile} dir1')
- system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir2/')
- system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir1/')
- system ('cp 19-1.signature 19.sub-1.signature')
- system ('cp 19.ly 19.sub.ly')
- system ('cp 19.profile 19.sub.profile')
- system ('cp 19.log 19.sub.log')
- system ('cp 19.png 19.sub.png')
- system ('cp 19.eps 19.sub.eps')
-
- system ('cp 20multipage* dir1')
- system ('cp 20multipage* dir2')
- system ('cp 19multipage-1.signature dir2/20multipage-1.signature')
-
-
- system ('mkdir -p dir1/subdir/ dir2/subdir/')
- system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir1/subdir/')
- system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir2/subdir/')
- system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir2/')
- system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir1/')
- system ('echo HEAD is 1 > dir1/tree.gittxt')
- system ('echo HEAD is 2 > dir2/tree.gittxt')
-
- ## introduce differences
- system ('cp 19-1.signature dir2/20-1.signature')
- system ('cp 19.profile dir2/20.profile')
- system ('cp 19.png dir2/20.png')
- system ('cp 19multipage-page1.png dir2/20multipage-page1.png')
- system ('cp 20-1.signature dir2/subdir/19.sub-1.signature')
- system ('cp 20.png dir2/subdir/19.sub.png')
- system ("sed 's/: /: 1/g' 20.profile > dir2/subdir/19.sub.profile")
-
- ## radical diffs.
- system ('cp 19-1.signature dir2/20grob-1.signature')
- system ('cp 19-1.signature dir2/20grob-2.signature')
- system ('cp 19multipage.midi dir1/midi-differ.midi')
- system ('cp 20multipage.midi dir2/midi-differ.midi')
- system ('cp 19multipage.log dir1/log-differ.log')
- system ('cp 19multipage.log dir2/log-differ.log && echo different >> dir2/log-differ.log && echo different >> dir2/log-differ.log')
-
- compare_trees ('dir1', 'dir2', 'compare-dir1dir2', options.threshold)
-
-
-def test_basic_compare ():
- ly_template = r"""
-
-\version "2.10.0"
-#(define default-toplevel-book-handler
- print-book-with-defaults-as-systems )
-
-#(ly:set-option (quote no-point-and-click))
-
-\sourcefilename "my-source.ly"
-
-%(papermod)s
-\header { tagline = ##f }
-\score {
-<<
-\new Staff \relative c {
- c4^"%(userstring)s" %(extragrob)s
- }
-\new Staff \relative c {
- c4^"%(userstring)s" %(extragrob)s
- }
->>
-\layout{}
-}
-
-"""
-
- dicts = [{ 'papermod' : '',
- 'name' : '20',
- 'extragrob': '',
- 'userstring': 'test' },
- { 'papermod' : '#(set-global-staff-size 19.5)',
- 'name' : '19',
- 'extragrob': '',
- 'userstring': 'test' },
- { 'papermod' : '',
- 'name' : '20expr',
- 'extragrob': '',
- 'userstring': 'blabla' },
- { 'papermod' : '',
- 'name' : '20grob',
- 'extragrob': 'r2. \\break c1',
- 'userstring': 'test' },
- ]
-
- for d in dicts:
- open (d['name'] + '.ly','w').write (ly_template % d)
-
- names = [d['name'] for d in dicts]
-
- system ('lilypond -ddump-profile -dseparate-log-files -ddump-signatures --png -dbackend=eps ' + ' '.join (names))
-
-
- multipage_str = r'''
- #(set-default-paper-size "a6")
- \score {
- \relative {c1 \pageBreak c1 }
- \layout {}
- \midi {}
- }
- '''
-
- open ('20multipage.ly', 'w').write (multipage_str.replace ('c1', 'd1'))
- open ('19multipage.ly', 'w').write ('#(set-global-staff-size 19.5)\n' + multipage_str)
- system ('lilypond -dseparate-log-files -ddump-signatures --png 19multipage 20multipage ')
-
- test_compare_signatures (names)
-
-def test_compare_signatures (names, timing=False):
- import time
-
- times = 1
- if timing:
- times = 100
-
- t0 = time.clock ()
-
- count = 0
- for t in range (0, times):
- sigs = dict ((n, read_signature_file ('%s-1.signature' % n)) for n in names)
- count += 1
-
- if timing:
- print 'elapsed', (time.clock() - t0)/count
-
-
- t0 = time.clock ()
- count = 0
- combinations = {}
- for (n1, s1) in sigs.items():
- for (n2, s2) in sigs.items():
- combinations['%s-%s' % (n1, n2)] = SystemLink (s1,s2).distance ()
- count += 1
-
- if timing:
- print 'elapsed', (time.clock() - t0)/count
-
- results = combinations.items ()
- results.sort ()
- for k,v in results:
- print '%-20s' % k, v
-
- assert combinations['20-20'] == (0.0,0.0,0.0)
- assert combinations['20-20expr'][0] > 0.0
- assert combinations['20-19'][2] < 10.0
- assert combinations['20-19'][2] > 0.0
-
-
-def run_tests ():
- dir = 'test-output-distance'
-
- do_clean = not os.path.exists (dir)
-
- print 'test results in ', dir
- if do_clean:
- system ('rm -rf ' + dir)
- system ('mkdir ' + dir)
-
- os.chdir (dir)
- if do_clean:
- test_basic_compare ()
-
- test_compare_trees ()
-
-################################################################
-#
-
-def main ():
- p = optparse.OptionParser ("output-distance - compare LilyPond formatting runs")
- p.usage = 'output-distance.py [options] tree1 tree2'
-
- p.add_option ('', '--test-self',
- dest="run_test",
- action="store_true",
- help='run test method')
-
- p.add_option ('--max-count',
- dest="max_count",
- metavar="COUNT",
- type="int",
- default=0,
- action="store",
- help='only analyze COUNT signature pairs')
-
- p.add_option ('', '--threshold',
- dest="threshold",
- default=0.3,
- action="store",
- type="float",
- help='threshold for geometric distance')
-
- p.add_option ('--no-compare-images',
- dest="compare_images",
- default=True,
- action="store_false",
- help="Don't run graphical comparisons")
-
- p.add_option ('--create-images',
- dest="create_images",
- default=False,
- action="store_true",
- help="Create PNGs from EPSes")
-
-
- p.add_option ('--local-datadir',
- dest="local_data_dir",
- default=False,
- action="store_true",
- help='whether to use the share/lilypond/ directory in the test directory')
-
- p.add_option ('-o', '--output-dir',
- dest="output_dir",
- default=None,
- action="store",
- type="string",
- help='where to put the test results [tree2/compare-tree1tree2]')
-
- global options
- (options, args) = p.parse_args ()
-
- if options.run_test:
- run_tests ()
- sys.exit (0)
-
- if len (args) != 2:
- p.print_usage()
- sys.exit (2)
-
- name = options.output_dir
- if not name:
- name = args[0].replace ('/', '')
- name = os.path.join (args[1], 'compare-' + shorten_string (name))
-
- compare_trees (args[0], args[1], name, options.threshold)
-
-if __name__ == '__main__':
- main()
-
+++ /dev/null
-#!@FONTFORGE@
-
-Open($1);
-MergeKern($2)
-
-
-# The AFM files of `New Century Schoolbook' family as distributed within the
-# urw-fonts-1.0.7pre41.tar.bz2 archive contain a bunch of ligatures which
-# shouldn't be active by default:
-#
-# T + M -> trademark
-# N + o -> afii61352
-# i + j -> ij
-# I + J -> IJ
-#
-# This font bundle is shipped by Fedora Core 6 and other GNU/Linux
-# distributions; we simply remove those ligatures.
-
-SelectIf("trademark", "trademark", \
- "afii61352", "afii61352", \
- "ij", "ij", \
- "IJ", "IJ");
-if (Strtol($version) < 20070501)
- RemoveATT("Ligature", "*", "*");
-else
- RemovePosSub("*");
-endif
-
-Generate($3 + $fontname + ".otf");
-
-# EOF
+++ /dev/null
-#!@PYTHON@
-
-"""
-Postprocess HTML files:
-add footer, tweak links, add language selection menu.
-"""
-import re
-import os
-import time
-import operator
-
-import langdefs
-
-# This is to try to make the docball not too big with almost duplicate files
-# see process_links()
-non_copied_pages = ['Documentation/user/out-www/lilypond-big-page',
- 'Documentation/user/out-www/lilypond-internals-big-page',
- 'Documentation/user/out-www/lilypond-learning-big-page',
- 'Documentation/user/out-www/lilypond-program-big-page',
- 'Documentation/user/out-www/music-glossary-big-page',
- 'out-www/examples',
- 'Documentation/topdocs',
- 'Documentation/bibliography',
- 'Documentation/out-www/THANKS',
- 'Documentation/out-www/DEDICATION',
- 'Documentation/out-www/devel',
- 'input/']
-
-def _doc (s):
- return s
-
-header = r"""
-"""
-
-footer = '''
-<div class="footer">
-<p class="footer_version">
-%(footer_name_version)s
-</p>
-<p class="footer_report">
-%(footer_report_links)s
-</p>
-</div>
-'''
-footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).')
-# ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process
-footer_report_links = _doc ('Your <a href="%(suggest_Docs_url)s">suggestions for the documentation</a> are welcome, please report errors to our <a href="%(mail_address_url)s">bug list</a>.')
-
-
-mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs'
-suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding'
-
-header_tag = '<!-- header_tag -->'
-header_tag_re = re.compile (header_tag)
-
-footer_tag = '<!-- footer_tag -->'
-footer_tag_re = re.compile (footer_tag)
-
-lang_available = _doc ("Other languages: %s.")
-browser_lang = _doc ('About <A HREF="%s">automatic language selection</A>.')
-browser_language_url = "/web/about/browser-language"
-
-LANGUAGES_TEMPLATE = '''
-<p id="languages">
- %(language_available)s
- <br/>
- %(browser_language)s
-</p>
-'''
-
-
-html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$')
-pages_dict = {}
-
-def build_pages_dict (filelist):
- """Build dictionary of available translations of each page"""
- global pages_dict
- for f in filelist:
- m = html_re.match (f)
- if m:
- g = m.groups()
- if len (g) <= 1 or g[1] == None:
- e = ''
- else:
- e = g[1]
- if not g[0] in pages_dict:
- pages_dict[g[0]] = [e]
- else:
- pages_dict[g[0]].append (e)
-
-def source_links_replace (m, source_val):
- return 'href="' + os.path.join (source_val, m.group (1)) + '"'
-
-splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\
-Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\
-lilypond-learning))/')
-
-snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets')
-user_ref_re = re.compile ('href="(?:\.\./)?lilypond\
-(-internals|-learning|-program|(?!-snippets))')
-
-docindex_link_re = re.compile (r'href="index.html"')
-
-
-## Windows does not support symlinks.
-# This function avoids creating symlinks for splitted HTML manuals
-# Get rid of symlinks in GNUmakefile.in (local-WWW-post)
-# this also fixes missing PNGs only present in translated docs
-def hack_urls (s, prefix):
- if splitted_docs_re.match (prefix):
- s = re.sub ('(href|src)="(../lily-.*?|.*?[.]png)"', '\\1="../\\2"', s)
-
- # fix xrefs between documents in different directories ad hoc
- if 'user/out-www/lilypond' in prefix:
- s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s)
- elif 'input/lsr' in prefix:
- s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s)
-
- # we also need to replace in the lsr, which is already processed above!
- if 'input/' in prefix or 'Documentation/topdocs' in prefix:
- # fix the link from the regtest, lsr and topdoc pages to the doc index
- # (rewrite prefix to obtain the relative path of the doc index page)
- rel_link = re.sub (r'out-www/.*$', '', prefix)
- rel_link = re.sub (r'[^/]*/', '../', rel_link)
- if 'input/regression' in prefix:
- indexfile = "Documentation/devel"
- else:
- indexfile = "index"
- s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s)
-
- source_path = os.path.join (os.path.dirname (prefix), 'source')
- if not os.path.islink (source_path):
- return s
- source_val = os.readlink (source_path)
- return re.sub ('href="source/(.*?)"', lambda m: source_links_replace (m, source_val), s)
-
-body_tag_re = re.compile ('(?i)<body([^>]*)>')
-html_tag_re = re.compile ('(?i)<html>')
-doctype_re = re.compile ('(?i)<!DOCTYPE')
-doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
-css_re = re.compile ('(?i)<link rel="stylesheet" type="text/css" ([^>]*)href="[^">]*?lilypond.*\.css"([^>]*)>')
-end_head_tag_re = re.compile ('(?i)</head>')
-css_link = """ <link rel="stylesheet" type="text/css" title="Patrick McCarty's design" href="%(rel)sDocumentation/lilypond-mccarty.css">
- <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond.css" title="Andrew Hawryluk's design">
- <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond-blue.css" title="Kurt Kroon's blue design">
- <!--[if lte IE 7]>
- <link href="%(rel)sDocumentation/lilypond-ie-fixes.css" rel="stylesheet" type="text/css">
- <![endif]-->
-"""
-
-
-def add_header (s, prefix):
- """Add header (<body>, doctype and CSS)"""
- if header_tag_re.search (s) == None:
- body = '<body\\1>'
- (s, n) = body_tag_re.subn (body + header, s, 1)
- if not n:
- (s, n) = html_tag_re.subn ('<html>' + header, s, 1)
- if not n:
- s = header + s
-
- s = header_tag + '\n' + s
-
- if doctype_re.search (s) == None:
- s = doctype + s
-
- if css_re.search (s) == None:
- depth = (prefix.count ('/') - 1) * '../'
- s = end_head_tag_re.sub ((css_link % {'rel': depth}) + '</head>', s)
- return s
-
-title_tag_re = re.compile ('.*?<title>(.*?)</title>', re.DOTALL)
-AT_web_title_re = re.compile ('@WEB-TITLE@')
-
-def add_title (s):
- # urg
- # maybe find first node?
- fallback_web_title = '-- --'
- m = title_tag_re.match (s)
- if m:
- fallback_web_title = m.group (1)
- s = AT_web_title_re.sub (fallback_web_title, s)
- return s
-
-footer_insert_re = re.compile ('<!--\s*FOOTER\s*-->')
-end_body_re = re.compile ('(?i)</body>')
-end_html_re = re.compile ('(?i)</html>')
-
-def add_footer (s, footer_text):
- """add footer"""
- (s, n) = footer_insert_re.subn (footer_text + '\n' + '<!-- FOOTER -->', s, 1)
- if not n:
- (s, n) = end_body_re.subn (footer_text + '\n' + '</body>', s, 1)
- if not n:
- (s, n) = end_html_re.subn (footer_text + '\n' + '</html>', s, 1)
- if not n:
- s += footer_text + '\n'
- return s
-
-def find_translations (prefix, lang_ext):
- """find available translations of a page"""
- available = []
- missing = []
- for l in langdefs.LANGUAGES:
- e = l.webext
- if lang_ext != e:
- if e in pages_dict[prefix]:
- available.append (l)
- elif lang_ext == '' and l.enabled and reduce (operator.and_,
- [not prefix.startswith (s)
- for s in non_copied_pages]):
- # English version of missing translated pages will be written
- missing.append (e)
- return available, missing
-
-online_links_re = re.compile ('''(href|src)=['"]\
-((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\
-([.]html)(#[^"']*|)['"]''')
-offline_links_re = re.compile ('href=[\'"]\
-((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]')
-big_page_name_re = re.compile ('''(.+?)-big-page''')
-
-def process_i18n_big_page_links (match, prefix, lang_ext):
- big_page_name = big_page_name_re.match (match.group (1))
- if big_page_name:
- destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix),
- big_page_name.group (0)))
- if not lang_ext in pages_dict[destination_path]:
- return match.group (0)
- return 'href="' + match.group (1) + '.' + lang_ext \
- + match.group (2) + match.group (3) + '"'
-
-def process_links (s, prefix, lang_ext, file_name, missing, target):
- page_flavors = {}
- if target == 'online':
- # Strip .html, suffix for auto language selection (content
- # negotiation). The menu must keep the full extension, so do
- # this before adding the menu.
- page_flavors[file_name] = \
- [lang_ext, online_links_re.sub ('\\1="\\2\\4"', s)]
- elif target == 'offline':
- # in LANG doc index: don't rewrite .html suffixes
- # as not all .LANG.html pages exist;
- # the doc index should be translated and contain links with the right suffixes
- if prefix == 'Documentation/out-www/index':
- page_flavors[file_name] = [lang_ext, s]
- elif lang_ext == '':
- page_flavors[file_name] = [lang_ext, s]
- for e in missing:
- page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = \
- [e, offline_links_re.sub ('href="\\1.' + e + '\\2\\3"', s)]
- else:
- # For saving bandwidth and disk space, we don't duplicate big pages
- # in English, so we must process translated big pages links differently.
- if 'big-page' in prefix:
- page_flavors[file_name] = \
- [lang_ext,
- offline_links_re.sub \
- (lambda match: process_i18n_big_page_links (match, prefix, lang_ext),
- s)]
- else:
- page_flavors[file_name] = \
- [lang_ext,
- offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)]
- return page_flavors
-
-def add_menu (page_flavors, prefix, available, target, translation):
- for k in page_flavors:
- language_menu = ''
- languages = ''
- if page_flavors[k][0] != '':
- t = translation[page_flavors[k][0]]
- else:
- t = _doc
- for lang in available:
- lang_file = lang.file_name (os.path.basename (prefix), '.html')
- if language_menu != '':
- language_menu += ', '
- language_menu += '<a href="%s">%s</a>' % (lang_file, t (lang.name))
- if target == 'offline':
- browser_language = ''
- elif target == 'online':
- browser_language = t (browser_lang) % browser_language_url
- if language_menu:
- language_available = t (lang_available) % language_menu
- languages = LANGUAGES_TEMPLATE % vars ()
- page_flavors[k][1] = add_footer (page_flavors[k][1], languages)
- return page_flavors
-
-
-def process_html_files (package_name = '',
- package_version = '',
- target = 'offline',
- name_filter = lambda s: s):
- """Add header, footer and tweak links to a number of HTML files
-
- Arguments:
- package_name=NAME set package_name to NAME
- package_version=VERSION set package version to VERSION
- targets=offline|online set page processing depending on the target
- offline is for reading HTML pages locally
- online is for hosting the HTML pages on a website with content
- negotiation
- name_filter a HTML file name filter
- """
- translation = langdefs.translation
- localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
-
- if "http://" in mail_address:
- mail_address_url = mail_address
- else:
- mail_address_url= 'mailto:' + mail_address
-
- versiontup = package_version.split ('.')
- branch_str = _doc ('stable-branch')
- if int (versiontup[1]) % 2:
- branch_str = _doc ('development-branch')
-
- # Initialize dictionaries for string formatting
- subst = {}
- subst[''] = dict ([i for i in globals ().items() if type (i[1]) is str])
- subst[''].update (dict ([i for i in locals ().items() if type (i[1]) is str]))
- for l in translation:
- e = langdefs.LANGDICT[l].webext
- if e:
- subst[e] = {}
- for name in subst['']:
- subst[e][name] = translation[l] (subst[''][name])
- # Do deeper string formatting as early as possible,
- # so only one '%' formatting pass is needed later
- for e in subst:
- subst[e]['footer_name_version'] = subst[e]['footer_name_version'] % subst[e]
- subst[e]['footer_report_links'] = subst[e]['footer_report_links'] % subst[e]
-
- for prefix, ext_list in pages_dict.items ():
- for lang_ext in ext_list:
- file_name = langdefs.lang_file_name (prefix, lang_ext, '.html')
- in_f = open (file_name)
- s = in_f.read()
- in_f.close()
-
- s = s.replace ('%', '%%')
- s = hack_urls (s, prefix)
- s = add_header (s, prefix)
-
- ### add footer
- if footer_tag_re.search (s) == None:
- s = add_footer (s, footer_tag + footer)
-
- available, missing = find_translations (prefix, lang_ext)
- page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target)
- # Add menu after stripping: must not have autoselection for language menu.
- page_flavors = add_menu (page_flavors, prefix, available, target, translation)
- for k in page_flavors:
- page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]]
- out_f = open (name_filter (k), 'w')
- out_f.write (page_flavors[k][1])
- out_f.close()
- # if the page is translated, a .en.html symlink is necessary for content negotiation
- if target == 'online' and ext_list != ['']:
- os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html'))
+++ /dev/null
-#! @PYTHON@
-
-import os
-import re
-import sys
-
-frm = re.compile (sys.argv[1], re.MULTILINE)
-to = sys.argv[2]
-
-if not sys.argv[3:] or sys.argv[3] == '-':
- sys.stdout.write (re.sub (frm, to, sys.stdin.read ()))
-for file in sys.argv[3:]:
- s = open (file).read ()
- name = os.path.basename (file)
- base, ext = os.path.splitext (name)
- t = re.sub (frm, to % locals (), s)
- if s != t:
- if 1:
- os.system ('mv %(file)s %(file)s~~' % locals ())
- h = open (file, "w")
- h.write (t)
- h.close ()
- else:
- sys.stdout.write (t)
+++ /dev/null
-#!/usr/bin/env python
-import os
-import sys
-
-for i in sys.argv[1:]:
- print os.path.realpath (i)
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# tely-gettext.py
-
-# Temporary script that helps translated docs sources conversion
-# for texi2html processing
-
-# USAGE: tely-gettext.py BUILDSCRIPT-DIR LOCALEDIR LANG FILES
-
-print "tely_gettext.py"
-
-import sys
-import re
-import os
-import gettext
-
-if len (sys.argv) > 3:
- buildscript_dir, localedir, lang = sys.argv[1:4]
-else:
- print """USAGE: tely-gettext.py BUILDSCRIPT-DIR LOCALEDIR LANG FILES
- For example buildscripts/tely-gettext.py buildscripts Documentation/po/out-www de Documentation/de/user/*.tely"""
- sys.exit (1)
-
-sys.path.append (buildscript_dir)
-import langdefs
-
-double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
-t = gettext.translation('lilypond-doc', localedir, [lang])
-_doc = t.gettext
-
-include_re = re.compile (r'@include (.*?)$', re.M)
-whitespaces = re.compile (r'\s+')
-ref_re = re.compile (r'(?ms)@(ruser|rprogram|ref|rlearning)\{(.*?)\}')
-node_section_re = re.compile (r'@node (.*?)\n@((?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) (.*?)\n')
-menu_entry_re = re.compile (r'\* (.*?)::')
-
-def ref_gettext (m):
- r = whitespaces.sub (' ', m.group (2))
- return '@' + m.group (1) + '{' + _doc (r) + '}'
-
-def node_gettext (m):
- return '@node ' + _doc (m.group (1)) + '\n@' + \
- m.group (2) + ' ' + _doc (m.group (3)) + \
- '\n@translationof ' + m.group (1) + '\n'
-
-def menu_entry_gettext (m):
- return '* ' + _doc (m.group (1)) + '::'
-
-def process_file (filename):
- print "Processing %s" % filename
- f = open (filename, 'r')
- page = f.read ()
- f.close()
- page = node_section_re.sub (node_gettext, page)
- page = ref_re.sub (ref_gettext, page)
- page = menu_entry_re.sub (menu_entry_gettext, page)
- page = page.replace ("""-- SKELETON FILE --
-When you actually translate this file, please remove these lines as
-well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", """@c -- SKELETON FILE --""")
- page = page.replace ('UNTRANSLATED NODE: IGNORE ME', "@c UNTRANSLATED NODE: IGNORE ME")
- includes = [whitespaces.sub ('', f) for f in include_re.findall (page)]
- f = open (filename, 'w')
- f.write (page)
- f.close ()
- dir = os.path.dirname (filename)
- for file in includes:
- p = os.path.join (dir, file)
- if os.path.exists (p):
- process_file (p)
-
-for filename in sys.argv[4:]:
- process_file (filename)
+++ /dev/null
-#!@PYTHON@
-# -*- coding: utf-8 -*-
-# texi-gettext.py
-
-# USAGE: texi-gettext.py [-o OUTDIR] LANG FILES
-#
-# -o OUTDIR specifies that output files should rather be written in OUTDIR
-#
-
-print "texi_gettext.py"
-
-import sys
-import re
-import os
-import getopt
-
-import langdefs
-
-optlist, args = getopt.getopt (sys.argv[1:],'o:')
-lang = args[0]
-files = args[1:]
-
-outdir = '.'
-for x in optlist:
- if x[0] == '-o':
- outdir = x[1]
-
-double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
-_doc = langdefs.translation[lang]
-
-include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M)
-whitespaces = re.compile (r'\s+')
-ref_re = re.compile (r'(?ms)@(rglos|ruser|rprogram|ref)(\{)(.*?)(\})')
-node_section_re = re.compile (r'@(node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading)( )(.*?)(\n)')
-menu_entry_re = re.compile (r'\* (.*?)::')
-
-def title_gettext (m):
- if m.group (2) == '{':
- r = whitespaces.sub (' ', m.group (3))
- else:
- r = m.group (3)
- return '@' + m.group (1) + m.group (2) + _doc (r) + m.group (4)
-
-def menu_entry_gettext (m):
- return '* ' + _doc (m.group (1)) + '::'
-
-def include_replace (m, filename):
- if os.path.exists (os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'):
- return '@include ' + m.group(1) + '.pdftexi'
- return m.group(0)
-
-def process_file (filename):
- print "Processing %s" % filename
- f = open (filename, 'r')
- page = f.read ()
- f.close()
- page = node_section_re.sub (title_gettext, page)
- page = ref_re.sub (title_gettext, page)
- page = menu_entry_re.sub (menu_entry_gettext, page)
- page = page.replace ("""-- SKELETON FILE --
-When you actually translate this file, please remove these lines as
-well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", '')
- page = page.replace ('UNTRANSLATED NODE: IGNORE ME', _doc ("This section has not been translated yet; please refer to the manual in English."))
- includes = include_re.findall (page)
- page = include_re.sub (lambda m: include_replace (m, filename), page)
- p = os.path.join (outdir, filename) [:-4] + 'pdftexi'
- f = open (p, 'w')
- f.write (page)
- f.close ()
- dir = os.path.dirname (filename)
- for file in includes:
- p = os.path.join (dir, file) + '.texi'
- if os.path.exists (p):
- process_file (p)
-
-for filename in files:
- process_file (filename)
+++ /dev/null
-#!@PYTHON@
-# texi-langutils.py
-
-# WARNING: this script can't find files included in a different directory
-
-import sys
-import re
-import getopt
-import os
-
-import langdefs
-
-def read_pipe (command):
- print command
- pipe = os.popen (command)
- output = pipe.read ()
- if pipe.close ():
- print "pipe failed: %(command)s" % locals ()
- return output
-
-
-optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext'])
-process_includes = not ('-n', '') in optlist # -n don't process @include's in texinfo files
-
-make_gettext = ('--gettext', '') in optlist # --gettext generate a node list from a Texinfo source
-make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source
-
-output_file = 'doc.pot'
-
-# @untranslated should be defined as a macro in Texinfo source
-node_blurb = '''@untranslated
-'''
-doclang = ''
-head_committish = read_pipe ('git-rev-parse HEAD')
-intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*-
-@c This file is part of %(topfile)s
-@ignore
- Translation of GIT committish: %(head_committish)s
- When revising a translation, copy the HEAD committish of the
- version that you are working on. See TRANSLATION for details.
-@end ignore
-'''
-
-end_blurb = """
-@c -- SKELETON FILE --
-"""
-
-for x in optlist:
- if x[0] == '-o': # -o NAME set PO output file name to NAME
- output_file = x[1]
- elif x[0] == '-d': # -d DIR set working directory to DIR
- os.chdir (x[1])
- elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB
- node_blurb = x[1]
- elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB
- intro_blurb = x[1]
- elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG
- doclang = '; documentlanguage: ' + x[1]
-
-texinfo_with_menus_re = re.compile (r"^(\*) +([^:\n]+)::.*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.*?)$|@(rglos){(.+?)}", re.M)
-
-texinfo_re = re.compile (r"^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M)
-
-ly_string_re = re.compile (r'^([a-zA-Z]+)[\t ]*=|%+[\t ]*(.*)$|\\(?:new|context)\s+(?:[a-zA-Z]*?(?:Staff(?:Group)?|Voice|FiguredBass|FretBoards|Names|Devnull))\s+=\s+"?([a-zA-Z]+)"?\s+')
-lsr_verbatim_ly_re = re.compile (r'% begin verbatim$')
-texinfo_verbatim_ly_re = re.compile (r'^@lilypond\[.*?verbatim')
-
-def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None, scan_ly=False):
- try:
- f = open (texifilename, 'r')
- texifile = f.read ()
- f.close ()
- printedfilename = texifilename.replace ('../','')
- includes = []
-
- # process ly var names and comments
- if output_file and (scan_ly or texifilename.endswith ('.ly')):
- lines = texifile.splitlines ()
- i = 0
- in_verb_ly_block = False
- if texifilename.endswith ('.ly'):
- verbatim_ly_re = lsr_verbatim_ly_re
- else:
- verbatim_ly_re = texinfo_verbatim_ly_re
- for i in range (len (lines)):
- if verbatim_ly_re.search (lines[i]):
- in_verb_ly_block = True
- elif lines[i].startswith ('@end lilypond'):
- in_verb_ly_block = False
- elif in_verb_ly_block:
- for (var, comment, context_id) in ly_string_re.findall (lines[i]):
- if var:
- output_file.write ('# ' + printedfilename + ':' + \
- str (i + 1) + ' (variable)\n_(r"' + var + '")\n')
- elif comment:
- output_file.write ('# ' + printedfilename + ':' + \
- str (i + 1) + ' (comment)\n_(r"' + \
- comment.replace ('"', '\\"') + '")\n')
- elif context_id:
- output_file.write ('# ' + printedfilename + ':' + \
- str (i + 1) + ' (context id)\n_(r"' + \
- context_id + '")\n')
-
- # process Texinfo node names and section titles
- if write_skeleton:
- g = open (os.path.basename (texifilename), 'w')
- subst = globals ()
- subst.update (locals ())
- g.write (i_blurb % subst)
- tutu = texinfo_with_menus_re.findall (texifile)
- node_trigger = False
- for item in tutu:
- if item[0] == '*':
- g.write ('* ' + item[1] + '::\n')
- elif output_file and item[4] == 'rglos':
- output_file.write ('_(r"' + item[5] + '") # @rglos in ' + printedfilename + '\n')
- elif item[2] == 'menu':
- g.write ('@menu\n')
- elif item[2] == 'end menu':
- g.write ('@end menu\n\n')
- else:
- g.write ('@' + item[2] + ' ' + item[3] + '\n')
- if node_trigger:
- g.write (n_blurb)
- node_trigger = False
- elif item[2] == 'include':
- includes.append (item[3])
- else:
- if output_file:
- output_file.write ('# @' + item[2] + ' in ' + \
- printedfilename + '\n_(r"' + item[3].strip () + '")\n')
- if item[2] == 'node':
- node_trigger = True
- g.write (end_blurb)
- g.close ()
-
- elif output_file:
- toto = texinfo_re.findall (texifile)
- for item in toto:
- if item[0] == 'include':
- includes.append(item[1])
- elif item[2] == 'rglos':
- output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n')
- else:
- output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n')
-
- if process_includes:
- dir = os.path.dirname (texifilename)
- for item in includes:
- process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file, scan_ly)
- except IOError, (errno, strerror):
- sys.stderr.write ("I/O error(%s): %s: %s\n" % (errno, texifilename, strerror))
-
-
-if intro_blurb != '':
- intro_blurb += '\n\n'
-if node_blurb != '':
- node_blurb = '\n' + node_blurb + '\n\n'
-if make_gettext:
- node_list_filename = 'node_list'
- node_list = open (node_list_filename, 'w')
- node_list.write ('# -*- coding: utf-8 -*-\n')
- for texi_file in texi_files:
- # Urgly: scan ly comments and variable names only in English doco
- is_english_doc = 'Documentation/user' in texi_file
- process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
- os.path.basename (texi_file), node_list,
- scan_ly=is_english_doc)
- for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'):
- node_list.write ('_(r"' + word + '")\n')
- node_list.close ()
- os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename)
-else:
- for texi_file in texi_files:
- process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
- os.path.basename (texi_file))
+++ /dev/null
-#!@PYTHON@
-# texi-skeleton-update.py
-
-import sys
-import glob
-import os
-import shutil
-
-sys.stderr.write ('texi-skeleton-update.py\n')
-
-orig_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[1] + '/*.ite??')])
-new_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[2] + '/*.ite??')])
-
-for f in new_skeletons:
- if f in orig_skeletons:
- g = open (os.path.join (sys.argv[1], f), 'r').read ()
- if '-- SKELETON FILE --' in g:
- sys.stderr.write ("Updating %s...\n" % f)
- shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
- elif f != 'fdl.itexi':
- sys.stderr.write ("Copying new file %s...\n" % f)
- shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
-
-for f in orig_skeletons.difference (new_skeletons):
- sys.stderr.write ("Warning: outdated skeleton file %s\n" % f)
+++ /dev/null
-#!@PYTHON@
-
-import getopt
-import os
-import re
-import sys
-import time
-
-def usage ():
- sys.stderr.write ('''
-texi2omf [options] FILE.texi > FILE.omf
-
-Options:
-
---format=FORM set format FORM (HTML, PS, PDF, [XML]).
---location=FILE file name as installed on disk.
---version=VERSION
-
-Use the following commands (enclose in @ignore)
-
-@omfsubject . .
-@omfdescription . .
-@omftype . .
-
-etc.
-
-
-''')
-
-(options, files) = getopt.getopt (sys.argv[1:], '',
- ['format=', 'location=', 'version='])
-
-license = 'FDL'
-location = ''
-version = ''
-email = os.getenv ('MAILADDRESS')
-name = os.getenv ('USERNAME')
-format = 'xml'
-
-for (o, a) in options:
- if o == '--format':
- format = a
- elif o == '--location':
- location = 'file:%s' % a
- elif o == '--version':
- version = a
- else:
- assert 0
-
-
-if not files:
- usage ()
- sys.exit (2)
-
-
-formats = {
- 'html' : 'text/html',
- 'pdf' : 'application/pdf',
- 'ps.gz' : 'application/postscript',
- 'ps' : 'application/postscript',
- 'xml' : 'text/xml',
- }
-
-if not formats.has_key (format):
- sys.stderr.write ("Format `%s' unknown\n" % format)
- sys.exit (1)
-
-
-infile = files[0]
-
-today = time.localtime ()
-
-texi = open (infile).read ()
-
-if not location:
- location = 'file:/%s' % re.sub (r'\..*', '.' + format, infile)
-
-omf_vars = {
- 'date': '%d-%d-%d' % today[:3],
- 'mimeformat': formats[format],
- 'maintainer': "%s (%s)" % (name, email),
- 'version' : version,
- 'location' : location,
- 'language' : 'C',
- }
-
-omf_caterories = ['subject', 'creator', 'maintainer', 'contributor',
- 'title', 'subtitle', 'version', 'category', 'type',
- 'description', 'license', 'language',]
-
-for a in omf_caterories:
- m = re.search ('@omf%s (.*)\n'% a, texi)
- if m:
- omf_vars[a] = m.group (1)
- elif not omf_vars.has_key (a):
- omf_vars[a] = ''
-
-if not omf_vars['title']:
- title = ''
- m = re.search ('@title (.*)\n', texi)
- if m:
- title = m.group (1)
-
- subtitle = ''
- m = re.search ('@subtitle (.*)\n', texi)
- if m:
- subtitle = m.group (1)
-
- if subtitle:
- title = '%s -- %s' % (title, subtitle)
-
- omf_vars['title'] = title
-
-if not omf_vars['creator']:
- m = re.search ('@author (.*)\n', texi)
- if m:
- omf_vars['creator'] = m.group (1)
-
-
-
-print r'''<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE omf PUBLIC "-//OMF//DTD Scrollkeeper OMF Variant V1.0//EN" "http://scrollkeeper.sourceforge.net/dtds/scrollkeeper-omf-1.0/scrollkeeper-omf.dtd">
-<omf>
- <resource>
- <creator>
- %(creator)s
- </creator>
- <maintainer>
- %(maintainer)s
- </maintainer>
- <title>
- %(title)s
- </title>
- <date>
- %(date)s
- </date>
- <version identifier="%(version)s" date="%(date)s" />
- <subject category="%(category)s"/>
- <description>
- %(description)s
- </description>
- <type>
- %(type)s
- </type>
- <format mime="%(mimeformat)s" />
- <identifier url="%(location)s"/>
- <language code="%(language)s"/>
- <rights type="%(license)s" />
- </resource>
-</omf>
-
-''' % omf_vars
-
-
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-USAGE: translations-status.py BUILDSCRIPT-DIR LOCALEDIR
-
- This script must be run from Documentation/
-
- Reads template files translations.template.html.in
-and for each LANG in LANGUAGES LANG/translations.template.html.in
- Writes translations.html.in and for each LANG in LANGUAGES
-translations.LANG.html.in
- Writes out/translations-status.txt
- Updates word counts in TRANSLATION
-"""
-
-import sys
-import re
-import string
-import os
-
-import langdefs
-import buildlib
-
-def progress (str):
- sys.stderr.write (str + '\n')
-
-progress ("translations-status.py")
-
-_doc = lambda s: s
-
-# load gettext messages catalogs
-translation = langdefs.translation
-
-
-language_re = re.compile (r'^@documentlanguage (.+)', re.M)
-comments_re = re.compile (r'^@ignore\n(.|\n)*?\n@end ignore$|@c .*?$', re.M)
-space_re = re.compile (r'\s+', re.M)
-lilypond_re = re.compile (r'@lilypond({.*?}|(.|\n)*?\n@end lilypond$)', re.M)
-node_re = re.compile ('^@node .*?$', re.M)
-title_re = re.compile ('^@(top|chapter|(?:sub){0,2}section|' + \
-'(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?) (.*?)$', re.M)
-include_re = re.compile ('^@include (.*?)$', re.M)
-
-translators_re = re.compile (r'^@c\s+Translators\s*:\s*(.*?)$', re.M | re.I)
-checkers_re = re.compile (r'^@c\s+Translation\s*checkers\s*:\s*(.*?)$',
- re.M | re.I)
-status_re = re.compile (r'^@c\s+Translation\s*status\s*:\s*(.*?)$', re.M | re.I)
-post_gdp_re = re.compile ('post.GDP', re.I)
-untranslated_node_str = '@untranslated'
-skeleton_str = '-- SKELETON FILE --'
-
-section_titles_string = _doc ('Section titles')
-last_updated_string = _doc (' <p><i>Last updated %s</i></p>\n')
-detailed_status_heads = [_doc ('Translators'), _doc ('Translation checkers'),
- _doc ('Translated'), _doc ('Up to date'),
- _doc ('Other info')]
-format_table = {
- 'not translated': {'color':'d0f0f8', 'short':_doc ('no'), 'abbr':'NT',
- 'long':_doc ('not translated')},
- 'partially translated': {'color':'dfef77',
- 'short':_doc ('partially (%(p)d %%)'),
- 'abbr':'%(p)d%%',
- 'long':_doc ('partially translated (%(p)d %%)')},
- 'fully translated': {'color':'1fff1f', 'short':_doc ('yes'), 'abbr':'FT',
- 'long': _doc ('translated')},
- 'up to date': {'short':_doc ('yes'), 'long':_doc ('up to date'),
- 'abbr':'100%%', 'vague':_doc ('up to date')},
- 'outdated': {'short':_doc ('partially'), 'abbr':'%(p)d%%',
- 'vague':_doc ('partially up to date')},
- 'N/A': {'short':_doc ('N/A'), 'abbr':'N/A', 'color':'d587ff', 'vague':''},
- 'pre-GDP':_doc ('pre-GDP'),
- 'post-GDP':_doc ('post-GDP')
-}
-
-texi_level = {
-# (Unumbered/Numbered/Lettered, level)
- 'top': ('u', 0),
- 'unnumbered': ('u', 1),
- 'unnumberedsec': ('u', 2),
- 'unnumberedsubsec': ('u', 3),
- 'chapter': ('n', 1),
- 'section': ('n', 2),
- 'subsection': ('n', 3),
- 'appendix': ('l', 1)
-}
-
-appendix_number_trans = string.maketrans ('@ABCDEFGHIJKLMNOPQRSTUVWXY',
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
-
-class SectionNumber (object):
- def __init__ (self):
- self.__data = [[0,'u']]
-
- def __increase_last_index (self):
- type = self.__data[-1][1]
- if type == 'l':
- self.__data[-1][0] = \
- self.__data[-1][0].translate (appendix_number_trans)
- elif type == 'n':
- self.__data[-1][0] += 1
-
- def format (self):
- if self.__data[-1][1] == 'u':
- return ''
- return '.'.join ([str (i[0]) for i in self.__data if i[1] != 'u']) + ' '
-
- def increase (self, (type, level)):
- if level == 0:
- self.__data = [[0,'u']]
- while level + 1 < len (self.__data):
- del self.__data[-1]
- if level + 1 > len (self.__data):
- self.__data.append ([0, type])
- if type == 'l':
- self.__data[-1][0] = '@'
- if type == self.__data[-1][1]:
- self.__increase_last_index ()
- else:
- self.__data[-1] = ([0, type])
- if type == 'l':
- self.__data[-1][0] = 'A'
- elif type == 'n':
- self.__data[-1][0] = 1
- return self.format ()
-
-
-def percentage_color (percent):
- p = percent / 100.0
- if p < 0.33:
- c = [hex (int (3 * p * b + (1 - 3 * p) * a))[2:]
- for (a, b) in [(0xff, 0xff), (0x5c, 0xa6), (0x5c, 0x4c)]]
- elif p < 0.67:
- c = [hex (int ((3 * p - 1) * b + (2 - 3 * p) * a))[2:]
- for (a, b) in [(0xff, 0xff), (0xa6, 0xff), (0x4c, 0x3d)]]
- else:
- c = [hex (int ((3 * p - 2) * b + 3 * (1 - p) * a))[2:]
- for (a, b) in [(0xff, 0x1f), (0xff, 0xff), (0x3d, 0x1f)]]
- return ''.join (c)
-
-
-def update_word_count (text, filename, word_count):
- return re.sub (r'(?m)^(\d+) *' + filename,
- str (word_count).ljust (6) + filename,
- text)
-
-po_msgid_re = re.compile (r'^msgid "(.*?)"(?:\n"(.*?)")*', re.M)
-
-def po_word_count (po_content):
- s = ' '.join ([''.join (t) for t in po_msgid_re.findall (po_content)])
- return len (space_re.split (s))
-
-sgml_tag_re = re.compile (r'<.*?>', re.S)
-
-def sgml_word_count (sgml_doc):
- s = sgml_tag_re.sub ('', sgml_doc)
- return len (space_re.split (s))
-
-def tely_word_count (tely_doc):
- '''
- Calculate word count of a Texinfo document node by node.
-
- Take string tely_doc as an argument.
- Return a list of integers.
-
- Texinfo comments and @lilypond blocks are not included in word counts.
- '''
- tely_doc = comments_re.sub ('', tely_doc)
- tely_doc = lilypond_re.sub ('', tely_doc)
- nodes = node_re.split (tely_doc)
- return [len (space_re.split (n)) for n in nodes]
-
-
-class TelyDocument (object):
- def __init__ (self, filename):
- self.filename = filename
- self.contents = open (filename).read ()
-
- ## record title and sectionning level of first Texinfo section
- m = title_re.search (self.contents)
- if m:
- self.title = m.group (2)
- self.level = texi_level [m.group (1)]
- else:
- self.title = 'Untitled'
- self.level = ('u', 1)
-
- m = language_re.search (self.contents)
- if m:
- self.language = m.group (1)
-
- included_files = [os.path.join (os.path.dirname (filename), t)
- for t in include_re.findall (self.contents)]
- self.included_files = [p for p in included_files if os.path.exists (p)]
-
- def print_title (self, section_number):
- return section_number.increase (self.level) + self.title
-
-
-class TranslatedTelyDocument (TelyDocument):
- def __init__ (self, filename, masterdocument, parent_translation=None):
- TelyDocument.__init__ (self, filename)
-
- self.masterdocument = masterdocument
- if not hasattr (self, 'language') \
- and hasattr (parent_translation, 'language'):
- self.language = parent_translation.language
- if hasattr (self, 'language'):
- self.translation = translation[self.language]
- else:
- self.translation = lambda x: x
- self.title = self.translation (self.title)
-
- ## record authoring information
- m = translators_re.search (self.contents)
- if m:
- self.translators = [n.strip () for n in m.group (1).split (',')]
- else:
- self.translators = parent_translation.translators
- m = checkers_re.search (self.contents)
- if m:
- self.checkers = [n.strip () for n in m.group (1).split (',')]
- elif isinstance (parent_translation, TranslatedTelyDocument):
- self.checkers = parent_translation.checkers
- else:
- self.checkers = []
-
- ## check whether translation is pre- or post-GDP
- m = status_re.search (self.contents)
- if m:
- self.post_gdp = bool (post_gdp_re.search (m.group (1)))
- else:
- self.post_gdp = False
-
- ## record which parts (nodes) of the file are actually translated
- self.partially_translated = not skeleton_str in self.contents
- nodes = node_re.split (self.contents)
- self.translated_nodes = [not untranslated_node_str in n for n in nodes]
-
- ## calculate translation percentage
- master_total_word_count = sum (masterdocument.word_count)
- translation_word_count = \
- sum ([masterdocument.word_count[k] * self.translated_nodes[k]
- for k in range (min (len (masterdocument.word_count),
- len (self.translated_nodes)))])
- self.translation_percentage = \
- 100 * translation_word_count / master_total_word_count
-
- ## calculate how much the file is outdated
- (diff_string, error) = \
- buildlib.check_translated_doc (masterdocument.filename, self.filename, self.contents)
- if error:
- sys.stderr.write ('warning: %s: %s' % (self.filename, error))
- self.uptodate_percentage = None
- else:
- diff = diff_string.splitlines ()
- insertions = sum ([len (l) - 1 for l in diff
- if l.startswith ('+')
- and not l.startswith ('+++')])
- deletions = sum ([len (l) - 1 for l in diff
- if l.startswith ('-')
- and not l.startswith ('---')])
- outdateness_percentage = 50.0 * (deletions + insertions) / \
- (masterdocument.size + 0.5 * (deletions - insertions))
- self.uptodate_percentage = 100 - int (outdateness_percentage)
- if self.uptodate_percentage > 100:
- alternative = 50
- progress ("%s: strange uptodateness percentage %d %%, \
-setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
- self.uptodate_percentage = alternative
- elif self.uptodate_percentage < 1:
- alternative = 1
- progress ("%s: strange uptodateness percentage %d %%, \
-setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
- self.uptodate_percentage = alternative
-
- def completeness (self, formats=['long'], translated=False):
- if translated:
- translation = self.translation
- else:
- translation = lambda x: x
-
- if isinstance (formats, str):
- formats = [formats]
- p = self.translation_percentage
- if p == 0:
- status = 'not translated'
- elif p == 100:
- status = 'fully translated'
- else:
- status = 'partially translated'
- return dict ([(f, translation (format_table[status][f]) % locals())
- for f in formats])
-
- def uptodateness (self, formats=['long'], translated=False):
- if translated:
- translation = self.translation
- else:
- translation = lambda x: x
-
- if isinstance (formats, str):
- formats = [formats]
- p = self.uptodate_percentage
- if p == None:
- status = 'N/A'
- elif p == 100:
- status = 'up to date'
- else:
- status = 'outdated'
- l = {}
- for f in formats:
- if f == 'color' and p != None:
- l['color'] = percentage_color (p)
- else:
- l[f] = translation (format_table[status][f]) % locals ()
- return l
-
- def gdp_status (self):
- if self.post_gdp:
- return self.translation (format_table['post-GDP'])
- else:
- return self.translation (format_table['pre-GDP'])
-
- def short_html_status (self):
- s = ' <td>'
- if self.partially_translated:
- s += '<br>\n '.join (self.translators) + '<br>\n'
- if self.checkers:
- s += ' <small>' + \
- '<br>\n '.join (self.checkers) + '</small><br>\n'
-
- c = self.completeness (['color', 'long'])
- s += ' <span style="background-color: #%(color)s">\
-%(long)s</span><br>\n' % c
-
- if self.partially_translated:
- u = self.uptodateness (['vague', 'color'])
- s += ' <span style="background-color: #%(color)s">\
-%(vague)s</span><br>\n' % u
-
- s += ' </td>\n'
- return s
-
- def text_status (self):
- s = self.completeness ('abbr')['abbr'] + ' '
-
- if self.partially_translated:
- s += self.uptodateness ('abbr')['abbr'] + ' '
- return s
-
- def html_status (self, numbering=SectionNumber ()):
- if self.title == 'Untitled':
- return ''
-
- if self.level[1] == 0: # if self is a master document
- s = '''<table align="center" border="2">
- <tr align="center">
- <th>%s</th>''' % self.print_title (numbering)
- s += ''.join ([' <th>%s</th>\n' % self.translation (h)
- for h in detailed_status_heads])
- s += ' </tr>\n'
- s += ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
- % (self.translation (section_titles_string),
- sum (self.masterdocument.word_count))
-
- else:
- s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
- % (self.print_title (numbering),
- sum (self.masterdocument.word_count))
-
- if self.partially_translated:
- s += ' <td>' + '<br>\n '.join (self.translators) + '</td>\n'
- s += ' <td>' + '<br>\n '.join (self.checkers) + '</td>\n'
- else:
- s += ' <td></td>\n' * 2
-
- c = self.completeness (['color', 'short'], translated=True)
- s += ' <td><span style="background-color: #%(color)s">\
-%(short)s</span></td>\n' % {'color': c['color'],
- 'short': c['short']}
-
- if self.partially_translated:
- u = self.uptodateness (['short', 'color'], translated=True)
- s += ' <td><span style="background-color: #%(color)s">\
-%(short)s</span></td>\n' % {'color': u['color'],
- 'short': u['short']}
- else:
- s += ' <td></td>\n'
-
- s += ' <td>' + self.gdp_status () + '</td>\n </tr>\n'
- s += ''.join ([i.translations[self.language].html_status (numbering)
- for i in self.masterdocument.includes
- if self.language in i.translations])
-
- if self.level[1] == 0: # if self is a master document
- s += '</table>\n<p></p>\n'
- return s
-
-class MasterTelyDocument (TelyDocument):
- def __init__ (self,
- filename,
- parent_translations=dict ([(lang, None)
- for lang in langdefs.LANGDICT])):
- TelyDocument.__init__ (self, filename)
- self.size = len (self.contents)
- self.word_count = tely_word_count (self.contents)
- translations = dict ([(lang, os.path.join (lang, filename))
- for lang in langdefs.LANGDICT])
- self.translations = \
- dict ([(lang,
- TranslatedTelyDocument (translations[lang],
- self, parent_translations.get (lang)))
- for lang in langdefs.LANGDICT
- if os.path.exists (translations[lang])])
- if self.translations:
- self.includes = [MasterTelyDocument (f, self.translations)
- for f in self.included_files]
- else:
- self.includes = []
-
- def update_word_counts (self, s):
- s = update_word_count (s, self.filename, sum (self.word_count))
- for i in self.includes:
- s = i.update_word_counts (s)
- return s
-
- def html_status (self, numbering=SectionNumber ()):
- if self.title == 'Untitled' or not self.translations:
- return ''
- if self.level[1] == 0: # if self is a master document
- s = '''<table align="center" border="2">
- <tr align="center">
- <th>%s</th>''' % self.print_title (numbering)
- s += ''.join ([' <th>%s</th>\n' % l for l in self.translations])
- s += ' </tr>\n'
- s += ' <tr align="left">\n <td>Section titles<br>(%d)</td>\n' \
- % sum (self.word_count)
-
- else: # if self is an included file
- s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
- % (self.print_title (numbering), sum (self.word_count))
-
- s += ''.join ([t.short_html_status ()
- for t in self.translations.values ()])
- s += ' </tr>\n'
- s += ''.join ([i.html_status (numbering) for i in self.includes])
-
- if self.level[1] == 0: # if self is a master document
- s += '</table>\n<p></p>\n'
- return s
-
- def text_status (self, numbering=SectionNumber (), colspec=[48,12]):
- if self.title == 'Untitled' or not self.translations:
- return ''
-
- s = ''
- if self.level[1] == 0: # if self is a master document
- s += (self.print_title (numbering) + ' ').ljust (colspec[0])
- s += ''.join (['%s'.ljust (colspec[1]) % l
- for l in self.translations])
- s += '\n'
- s += ('Section titles (%d)' % \
- sum (self.word_count)).ljust (colspec[0])
-
- else:
- s = '%s (%d) ' \
- % (self.print_title (numbering), sum (self.word_count))
- s = s.ljust (colspec[0])
-
- s += ''.join ([t.text_status ().ljust(colspec[1])
- for t in self.translations.values ()])
- s += '\n\n'
- s += ''.join ([i.text_status (numbering) for i in self.includes])
-
- if self.level[1] == 0:
- s += '\n'
- return s
-
-
-update_category_word_counts_re = re.compile (r'(?ms)^-(\d+)-(.*?\n)\d+ *total')
-
-counts_re = re.compile (r'(?m)^(\d+) ')
-
-def update_category_word_counts_sub (m):
- return '-' + m.group (1) + '-' + m.group (2) + \
- str (sum ([int (c)
- for c in counts_re.findall (m.group (2))])).ljust (6) + \
- 'total'
-
-
-progress ("Reading documents...")
-
-tely_files = \
- buildlib.read_pipe ("find -maxdepth 2 -name '*.tely'")[0].splitlines ()
-tely_files.sort ()
-master_docs = [MasterTelyDocument (os.path.normpath (filename))
- for filename in tely_files]
-master_docs = [doc for doc in master_docs if doc.translations]
-
-main_status_page = open ('translations.template.html.in').read ()
-
-enabled_languages = [l for l in langdefs.LANGDICT
- if langdefs.LANGDICT[l].enabled
- and l != 'en']
-lang_status_pages = \
- dict ([(l, open (os.path.join (l, 'translations.template.html.in')). read ())
- for l in enabled_languages])
-
-progress ("Generating status pages...")
-
-date_time = buildlib.read_pipe ('LANG= date -u')[0]
-
-main_status_html = last_updated_string % date_time
-main_status_html += '\n'.join ([doc.html_status () for doc in master_docs])
-
-html_re = re.compile ('<html>', re.I)
-end_body_re = re.compile ('</body>', re.I)
-
-html_header = '''<html>
-<!-- This page is automatically generated by translation-status.py from
-translations.template.html.in; DO NOT EDIT !-->'''
-
-main_status_page = html_re.sub (html_header, main_status_page)
-
-main_status_page = end_body_re.sub (main_status_html + '\n</body>',
- main_status_page)
-
-open ('translations.html.in', 'w').write (main_status_page)
-
-for l in enabled_languages:
- date_time = buildlib.read_pipe ('LANG=%s date -u' % l)[0]
- lang_status_pages[l] = translation[l] (last_updated_string) % date_time + lang_status_pages[l]
- lang_status_page = html_re.sub (html_header, lang_status_pages[l])
- html_status = '\n'.join ([doc.translations[l].html_status ()
- for doc in master_docs
- if l in doc.translations])
- lang_status_page = end_body_re.sub (html_status + '\n</body>',
- lang_status_page)
- open (os.path.join (l, 'translations.html.in'), 'w').write (lang_status_page)
-
-main_status_txt = '''Documentation translations status
-Generated %s
-NT = not translated
-FT = fully translated
-
-''' % date_time
-
-main_status_txt += '\n'.join ([doc.text_status () for doc in master_docs])
-
-status_txt_file = 'out/translations-status.txt'
-progress ("Writing %s..." % status_txt_file)
-open (status_txt_file, 'w').write (main_status_txt)
-
-translation_instructions_file = 'TRANSLATION'
-progress ("Updating %s..." % translation_instructions_file)
-translation_instructions = open (translation_instructions_file).read ()
-
-for doc in master_docs:
- translation_instructions = doc.update_word_counts (translation_instructions)
-
-for html_file in re.findall (r'(?m)^\d+ *(\S+?\.html\S*?)(?: |$)',
- translation_instructions):
- word_count = sgml_word_count (open (html_file).read ())
- translation_instructions = update_word_count (translation_instructions,
- html_file,
- word_count)
-
-for po_file in re.findall (r'(?m)^\d+ *(\S+?\.po\S*?)(?: |$)',
- translation_instructions):
- word_count = po_word_count (open (po_file).read ())
- translation_instructions = update_word_count (translation_instructions,
- po_file,
- word_count)
-
-translation_instructions = \
- update_category_word_counts_re.sub (update_category_word_counts_sub,
- translation_instructions)
-
-open (translation_instructions_file, 'w').write (translation_instructions)
+++ /dev/null
-#!@PYTHON@
-# update-snippets.py
-
-# USAGE: update-snippets.py REFERENCE-DIR TARGET-DIR FILES
-#
-# update ly snippets in TARGET-DIR/FILES with snippets from REFERENCE-DIR/FILES
-#
-# More precisely, each existing FILE in TARGET-DIR is matched to the FILE in
-# REFERENCE-DIR (it the latter does not exist, a warning is given).
-#
-# Shell wildcards expansion is performed on FILES.
-# This script currently supports Texinfo format.
-# Ly snippets preceded with a line containing '@c KEEP LY' in TARGET-DIR/FILES
-# will not be updated.
-# An error occurs if REFERENCE-DIR/FILE and TARGET-DIR/FILE do not have the
-# same snippets count.
-
-import sys
-import os
-import glob
-import re
-
-print "update-snippets.py"
-
-comment_re = re.compile (r'(?<!@)(@c(?:omment)? .*?\n|^@ignore\n.*?\n@end ignore\n)', re.M | re.S)
-snippet_re = re.compile (r'^(@lilypond(?:file)?(?:\[.*?\])?\s*\{.+?\}|@lilypond(?:\[.*?\])?(?:.|\n)+?@end lilypond)', re.M)
-
-
-def snippet_split (l):
- r = []
- for s in [s for s in l if s]:
- if s.startswith ('@c ') or s.startswith ('@ignore\n') or s.startswith ('@comment '):
- r.append(s)
- else:
- r += [t for t in snippet_re.split (s) if t]
- return r
-
-def count_snippet (l):
- k = 0
- for s in l:
- if s.startswith ('@lilypond'):
- k += 1
- return k
-
-def find_next_snippet (l, k):
- while not l[k].startswith ('@lilypond'):
- k += 1
- return k
-
-exit_code = 0
-
-def update_exit_code (code):
- global exit_code
- exit_code = max (code, exit_code)
-
-ref_dir, target_dir = sys.argv [1:3]
-file_patterns = sys.argv[3:]
-
-total_snippet_count = 0
-changed_snippets_count = 0
-
-for pattern in file_patterns:
- files = glob.glob (os.path.join (target_dir, pattern))
- for file in files:
- ref_file = os.path.join (ref_dir, os.path.basename (file))
- if not os.path.isfile (ref_file):
- sys.stderr.write ("Warning: %s: no such file.\nReference file for %s not found.\n" % (ref_file, file))
- continue
- f = open (file, 'r')
- target_source = comment_re.split (f.read ())
- f.close ()
- if reduce (lambda x, y: x or y, ['-- SKELETON FILE --' in s for s in target_source]):
- sys.stderr.write ("Skipping skeleton file %s\n" % file)
- continue
- g = open (ref_file, 'r')
- ref_source = comment_re.split (g.read ())
- target_source = snippet_split (target_source)
- ref_source = snippet_split (ref_source)
- if '' in target_source or '' in ref_source:
- raise "AAAAARGH: unuseful empty string"
- snippet_count = count_snippet (target_source)
- if not snippet_count == count_snippet (ref_source):
- update_exit_code (1)
- sys.stderr.write ("Error: %s and %s have different snippet counts.\n\
-Update translation by at least adding a @lilypond block where necessary, then rerun this script.\n" % (ref_file, file))
- continue
- total_snippet_count += snippet_count
- c = 0
- k = -1
- for j in range (len (target_source)):
- if target_source[j].startswith ('@lilypond'):
- k = find_next_snippet (ref_source, k+1)
- if j > 0 and (not target_source[j-1].startswith ('@c KEEP LY')) and target_source[j] != ref_source[k]:
- target_source[j] = ref_source[k]
- c += 1
- changed_snippets_count += 1
- f = open (file, 'w')
- f.write (''.join (target_source))
- sys.stderr.write ('%s: %d/%d snippets updated\n' % (file, c, snippet_count))
-
-sys.stderr.write ('\nTotal: %d snippets, %d updated snippets.\n' % (total_snippet_count, changed_snippets_count))
-sys.exit (exit_code)
+++ /dev/null
-#!@PYTHON@
-
-## This is www_post.py. This script is the main stage
-## of toplevel GNUmakefile local-WWW-post target.
-
-# USAGE: www_post PACKAGE_NAME TOPLEVEL_VERSION OUTDIR TARGETS
-# please call me from top of the source directory
-
-import sys
-import os
-import re
-
-import langdefs
-
-import mirrortree
-import postprocess_html
-
-package_name, package_version, outdir, targets = sys.argv[1:]
-targets = targets.split (' ')
-outdir = os.path.normpath (outdir)
-doc_dirs = ['input', 'Documentation', outdir]
-target_pattern = os.path.join (outdir, '%s-root')
-
-# these redirection pages allow to go back to the documentation index
-# from HTML manuals/snippets page
-static_files = {
- os.path.join (outdir, 'index.html'):
- '''<META HTTP-EQUIV="refresh" content="0;URL=Documentation/index.html">
-<html><body>Redirecting to the documentation index...</body></html>\n''',
- os.path.join (outdir, 'VERSION'):
- package_version + '\n',
- os.path.join ('input', 'lsr', outdir, 'index.html'):
- '''<META HTTP-EQUIV="refresh" content="0;URL=../../index.html">
-<html><body>Redirecting to the documentation index...</body></html>\n'''
- }
-
-for l in langdefs.LANGUAGES:
- static_files[os.path.join ('Documentation', 'user', outdir, l.file_name ('index', '.html'))] = \
- '<META HTTP-EQUIV="refresh" content="0;URL=../' + l.file_name ('index', '.html') + \
- '">\n<html><body>Redirecting to the documentation index...</body></html>\n'
-
-for f, contents in static_files.items ():
- open (f, 'w').write (contents)
-
-sys.stderr.write ("Mirrorring...\n")
-dirs, symlinks, files = mirrortree.walk_tree (
- tree_roots = doc_dirs,
- process_dirs = outdir,
- exclude_dirs = '(^|/)(' + r'|po|out|out-test|.*?[.]t2d|\w*?-root)(/|$)|Documentation/(' + '|'.join ([l.code for l in langdefs.LANGUAGES]) + ')',
- find_files = r'.*?\.(?:midi|html|pdf|png|txt|i?ly|signature|css|zip|xml|mxl)$|VERSION',
- exclude_files = r'lily-[0-9a-f]+.*\.(pdf|txt)')
-
-# actual mirrorring stuff
-html_files = []
-hardlinked_files = []
-for f in files:
- if f.endswith ('.html'):
- html_files.append (f)
- else:
- hardlinked_files.append (f)
-dirs = [re.sub ('/' + outdir, '', d) for d in dirs]
-while outdir in dirs:
- dirs.remove (outdir)
-dirs = list (set (dirs))
-dirs.sort ()
-
-strip_file_name = {}
-strip_re = re.compile (outdir + '/')
-for t in targets:
- out_root = target_pattern % t
- strip_file_name[t] = lambda s: os.path.join (target_pattern % t, (strip_re.sub ('', s)))
- os.mkdir (out_root)
- map (os.mkdir, [os.path.join (out_root, d) for d in dirs])
- for f in hardlinked_files:
- os.link (f, strip_file_name[t] (f))
- for l in symlinks:
- p = mirrortree.new_link_path (os.path.normpath (os.readlink (l)), os.path.dirname (l), strip_re)
- dest = strip_file_name[t] (l)
- if not os.path.exists (dest):
- os.symlink (p, dest)
-
- ## ad-hoc renaming to make xrefs between PDFs work
- os.rename (os.path.join (out_root, 'input/lsr/lilypond-snippets.pdf'),
- os.path.join (out_root, 'Documentation/user/lilypond-snippets.pdf'))
-
-# need this for content negotiation with documentation index
-if 'online' in targets:
- f = open (os.path.join (target_pattern % 'online', 'Documentation/.htaccess'), 'w')
- f.write ('#.htaccess\nDirectoryIndex index\n')
- f.close ()
-
-postprocess_html.build_pages_dict (html_files)
-for t in targets:
- sys.stderr.write ("Processing HTML pages for %s target...\n" % t)
- postprocess_html.process_html_files (
- package_name = package_name,
- package_version = package_version,
- target = t,
- name_filter = strip_file_name[t])
-
NCSB_FILE=`$FCMATCH --verbose "Century Schoolbook L:style=$style" | grep 'file:' | grep -v "\.ttf"`
NCSB_FILE=`echo $NCSB_FILE | sed 's/^.*"\(.*\)".*$/\1/g'`
- NCSB_FILE=`$PYTHON "$srcdir/buildscripts/readlink.py" $NCSB_FILE`
+ NCSB_FILE=`$PYTHON "$srcdir/scripts/aux/readlink.py" $NCSB_FILE`
NCSB_SOURCE_FILES="$NCSB_FILE $NCSB_SOURCE_FILES"
done
else
LILYPOND_WORDS = $(outdir)/lilypond-words.el
LILYPOND_WORDS_DEPENDS =\
$(top-src-dir)/lily/lily-lexer.cc \
- $(buildscript-dir)/lilypond-words.py \
+ $(buildscript-dir)/lilypond-words \
$(top-src-dir)/scm/markup.scm \
$(top-src-dir)/ly/engraver-init.ly
+$(buildscript-dir)/lilypond-words:
+ make -C $(depth)/scripts/build
+
$(LILYPOND_WORDS):
- cd $(top-src-dir) && $(PYTHON) buildscripts/lilypond-words.py --el --dir=$(top-build-dir)/elisp/$(outconfbase)
+ cd $(top-src-dir) && $(buildscript-dir)/lilypond-words --el --dir=$(top-build-dir)/elisp/$(outconfbase)
all: $(LILYPOND_WORDS)
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'install', 'src_glob')
-sources = src_glob ('*.el') + ['lilypond-words.el']
-
-e = env.Copy ()
-a = '$PYTHON $srcdir/buildscripts/lilypond-words.py --el --dir=${TARGET.dir}'
-e.Command ('lilypond-words.el',
- ['#/lily/lily-lexer.cc',
- '#/buildscripts/lilypond-words.py',
- '#/scm/markup.scm',
- '#/ly/engraver-init.ly',],
- a)
-
-install (sources, env['sharedir_package_version'] + '/elisp')
+++ /dev/null
-# -*-python-*-
-
-name = 'flower'
-outdir = Dir ('.').path
-
-Import ('env', 'src_glob')
-sources = src_glob ('*.cc')
-
-e = env.Copy ()
-e.Append (CPPPATH = ['#/flower/include', outdir,])
-includes = src_glob ('include/*.hh')
-
-if 1: # ut
- def test_source (x):
- x.startswith ('test')
- test_sources = filter (lambda x: x.startswith ('test'), sources)
- sources = filter (lambda x: not x.startswith ('test'), sources)
- ee = e.Copy ()
- ee.Append (LIBS = [name, 'boost_unit_test_framework'])
- test = ee.Program ('test' + name, test_sources)
-
-if env['static']:
- e.Library (name, sources)
-if not env['static'] or env['shared']:
- e.SharedLibrary (name, sources)
-
-po = env.Command ('lilypond.po', sources + includes, env['pocommand'])
-env.Alias ('po-update', po)
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'mutopia')
-mutopia ()
GENERATED_ITELY_FILES = $(IN_ITELY_FILES:%-intro.itely=$(outdir)/%.itely)
$(outdir)/%.itely: %-intro.itely %.snippet-list
- xargs $(PYTHON) $(buildscript-dir)/lys-to-tely.py -f doctitle,texidoc,verbatim --name=$@ --template=$< < $(filter %.snippet-list, $^)
+ xargs $(LYS_TO_TELY) -f doctitle,texidoc,verbatim --name=$@ --template=$< < $(filter %.snippet-list, $^)
$(outdir)/lilypond-snippets.texi: $(GENERATED_ITELY_FILES) $(LY_FILES)
To update this directory, do at top of the source tree
-buildscripts/makelsr.py DIR
+scripts/aux/makelsr.py DIR
where DIR is the directory unpacked from lsr-snippets-doc-DATE tarball
available on http://lsr.dsi.unimi.it/download.
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'collate')
-collate (title = 'LilyPond Examples')
"
doctitle = "Avoiding collisions of chord fingering with beams"
+ texidocfr = "
+Les doigtés et les numéros de cordes attachés à des notes seules
+évitent automatiquement les barres de ligature, mais ce n'est pas le
+cas par défaut pour les doigtés ou numéros de cordes attachés aux
+notes d'un accord. L'exemple qui suit montre comment ce comportement
+par défaut peut être corrigé.
+"
+ doctitlefr = "Éviter les collisions entre les doigtés d'accords et les ligatures"
} % begin verbatim
\relative c' {
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'collate')
-collate (title = 'LilyPond Examples from the Manual')
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'mutopia')
-mutopia ()
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'mutopia')
-mutopia ()
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'mutopia')
-mutopia ()
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'mutopia')
-mutopia ()
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'mutopia')
-mutopia ()
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'mutopia')
-mutopia ()
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'collate')
-collate (title = 'Advanced snippets')
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'collate')
-collate (title = 'LilyPond Regression Tests')
"
doctitlees = "Evitar colisiones entre digitaciones de acordes y barras de corchea"
+
+%% Translation of GIT committish: 98dc713cb34b498f145badf23d14957367a19ece
+ texidocfr = "
+Les doigtés et les numéros de cordes attachés à des notes seules
+évitent automatiquement les barres de ligature, mais ce n'est pas le cas par
+défaut pour les doigtés ou numéros de cordes attachés aux notes d'un
+accord. L'exemple qui suit montre comment ce comportement par défaut
+peut être corrigé.
+"
+ doctitlefr = "Éviter les collisions entre les doigtés d'accords et les ligatures"
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'src_glob', 'install')
-
-outdir = Dir ('.').abspath
-
-cc_sources = src_glob ('*.cc')
-sources = cc_sources + ['parser.yy', 'lexer.ll']
-includes = src_glob ('include/*.hh')
-
-e = env.Copy ()
-
-e.Append (
- CPPPATH = [
- '#/lily/include',
- '#/flower/include',
- outdir],
- LEXFLAGS = ['-Cfe', '-p', '-p'],
- LIBS = ['flower'],
- )
-
-e.HH ('parser.hh', 'parser.yy')
-e.ParseConfig ('guile-config link')
-lily = e.Program ('lilypond', sources)
-install (lily, env['bindir'])
-
-# let's not, for now
-#po = env.Command ('lilypond.po', cc_sources + includes, env['pocommand'])
-#env.Alias ('po-update', po)
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'install', 'src_glob')
-sources = src_glob ('*.ly')
-install (sources, env['sharedir_package_version'] + '/ly')
default:
local-WWW-2: $(OUT_HTML_FILES)
- $(PYTHON) $(buildscript-dir)/mass-link.py --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/$(outdir) $(HTML_FILES)
+ $(buildscript-dir)/mass-link --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/$(outdir) $(HTML_FILES)
endif
$(outdir)/%.pdftexi: $(outdir)/%.texi
- $(PYTHON) $(buildscript-dir)/texi-gettext.py $(ISOLANG) $<
+ $(buildscript-dir)/texi-gettext $(ISOLANG) $<
$(outdir)/%.pdf: $(outdir)/%.pdftexi
cd $(outdir); texi2pdf $(TEXI2PDF_FLAGS) $(TEXINFO_PAPERSIZE_OPTION) $(notdir $*).pdftexi
ln -f $< $@
$(XREF_MAPS_DIR)/%.$(ISOLANG).xref-map: $(outdir)/%.texi
- $(PYTHON) $(buildscript-dir)/extract_texi_filenames.py -o $(XREF_MAPS_DIR) $<
+ $(buildscript-dir)/extract_texi_filenames -o $(XREF_MAPS_DIR) $<
$(MASTER_TEXI_FILES): $(ITELY_FILES) $(ITEXI_FILES)
local-WWW-1: $(MASTER_TEXI_FILES) $(PDF_FILES) $(XREF_MAPS_FILES)
local-WWW-2: $(DEEP_HTML_FILES) $(BIG_PAGE_HTML_FILES) $(DOCUMENTATION_LOCALE_TARGET)
- find $(outdir) -name '*.html' | xargs grep -L 'UNTRANSLATED NODE: IGNORE ME' | xargs $(PYTHON) $(buildscript-dir)/html-gettext.py $(ISOLANG)
- find $(outdir) -name '*.html' | xargs grep -L --label="" 'UNTRANSLATED NODE: IGNORE ME' | sed 's!$(outdir)/!!g' | xargs $(PYTHON) $(buildscript-dir)/mass-link.py --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir) $(TELY_FILES:%.tely=%.pdf)
- find $(outdir) \( -name 'lily-*.png' -o -name 'lily-*.ly' \) | sed 's!$(outdir)/!!g' | xargs $(PYTHON) $(buildscript-dir)/mass-link.py hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir)
+ find $(outdir) -name '*.html' | xargs grep -L 'UNTRANSLATED NODE: IGNORE ME' | xargs $(buildscript-dir)/html-gettext $(ISOLANG)
+ find $(outdir) -name '*.html' | xargs grep -L --label="" 'UNTRANSLATED NODE: IGNORE ME' | sed 's!$(outdir)/!!g' | xargs $(buildscript-dir)/mass-link --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir) $(TELY_FILES:%.tely=%.pdf)
+ find $(outdir) \( -name 'lily-*.png' -o -name 'lily-*.ly' \) | sed 's!$(outdir)/!!g' | xargs $(buildscript-dir)/mass-link hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir)
$(DOCUMENTATION_LOCALE_TARGET):
$(MAKE) -C $(depth)/Documentation/po out=www messages
# you do make dist
#
-buildscript-dir = $(src-depth)/buildscripts
+buildscript-dir = $(top-build-dir)/scripts/build/$(outconfbase)
+auxpython-dir = $(src-depth)/python/aux
+auxscript-dir = $(src-depth)/scripts/aux
script-dir = $(src-depth)/scripts
input-dir = $(src-depth)/input
make-dir = $(src-depth)/make
include-flower = $(src-depth)/flower/include
-export PYTHONPATH:=$(buildscript-dir):$(PYTHONPATH)
+export PYTHONPATH:=$(auxpython-dir):$(PYTHONPATH)
LILYPOND_INCLUDES = $(include-flower) $(depth)/flower/$(outdir)
ifeq ($(LILYPOND_EXTERNAL_BINARY),)
# environment settings.
-export PATH:=$(top-build-dir)/lily/$(outconfbase):$(top-build-dir)/buildscripts/$(outconfbase):$(top-build-dir)/scripts/$(outconfbase):$(PATH):
+export PATH:=$(top-build-dir)/lily/$(outconfbase):$(buildscript-dir):$(top-build-dir)/scripts/$(outconfbase):$(PATH):
export LILYPOND_BINARY=$(top-build-dir)/$(outconfbase)/bin/lilypond
else
#texi-html for www only:
LILYPOND_BOOK_FORMAT=$(if $(subst out-www,,$(notdir $(outdir))),texi,texi-html)
LY2DVI = $(LILYPOND_BINARY)
-LYS_TO_TELY = $(buildscript-dir)/lys-to-tely.py
+LYS_TO_TELY = $(buildscript-dir)/lys-to-tely
$(outdir)/collated-files.tely: $(COLLATED_FILES)
- $(PYTHON) $(LYS_TO_TELY) --name=$(outdir)/collated-files.tely --title="$(TITLE)" $^
+ $(LYS_TO_TELY) --name=$(outdir)/collated-files.tely --title="$(TITLE)" $^
$(outdir)/aybabtu.subfonts:
echo $(subst .mf,,$(call src-wildcard,feta-braces-[a-z].mf)) > $@
-$(PE_SCRIPTS): $(top-build-dir)/buildscripts/$(outdir)/gen-emmentaler-scripts
- $(PYTHON) $< --dir=$(outdir)
+$(PE_SCRIPTS): $(buildscript-dir)/gen-emmentaler-scripts
+ $< --dir=$(outdir)
ALL_FONTS = $(FETA_FONTS)
PFB_FILES = $(ALL_FONTS:%=$(outdir)/%.pfb)
$(outdir)/%.otf-gtable \
$(outdir)/%.enc \
$(outdir)/%.pe: $(outdir)/%.log
- $(PYTHON) $(buildscript-dir)/mf-to-table.py \
- --global-lisp=$(outdir)/$(<F:.log=.otf-gtable) \
- --lisp=$(outdir)/$(<F:.log=.lisp) \
- --outdir=$(outdir) \
- --enc $(outdir)/$(<F:.log=.enc) \
- $<
+ $(buildscript-dir)/mf-to-table \
+ --global-lisp=$(outdir)/$(<F:.log=.otf-gtable) \
+ --lisp=$(outdir)/$(<F:.log=.lisp) \
+ --outdir=$(outdir) \
+ --enc $(outdir)/$(<F:.log=.enc) \
+ $<
local-clean:
rm -f mfplain.mem mfplain.log
echo '<fontconfig><dir>'$(shell cd $(outdir); pwd)'</dir></fontconfig>' > $@
$(NCSB_OTFS): $(NCSB_SOURCE_FILES) \
- $(buildscript-dir)/pfx2ttf.fontforge
+ $(auxscript-dir)/pfx2ttf.fontforge
$(foreach i, $(basename $(NCSB_SOURCE_FILES)), \
- $(FONTFORGE) -script $(buildscript-dir)/pfx2ttf.fontforge \
+ $(FONTFORGE) -script $(auxscript-dir)/pfx2ttf.fontforge \
$(i).pfb $(i).afm $(outdir)/ && ) true
# eof
+++ /dev/null
-# -*-python-*-
-
-import os
-import re
-import string
-
-Import ('env', 'base_glob', 'install')
-feta = reduce (lambda x, y: x + y,
- map (lambda x: base_glob (x),
- ('feta[0-9]*.mf',
- 'feta-alphabet*[0-9].mf',
- 'feta-braces-[a-z]*.mf',
- 'parmesan[0-9]*.mf',)))
-feta = base_glob ('feta[0-9][0-9]*.mf')
-feta_alphabet = base_glob ('feta-alphabet[0-9][0-9]*.mf')
-feta_braces = base_glob ('feta-braces-[a-z].mf')
-parmesan = base_glob ('parmesan[0-9][0-9]*.mf')
-
-fonts = feta + feta_alphabet + feta_braces + parmesan
-
-feta_sizes = map (lambda x: re.sub ('feta([0-9]+)', '\\1', x), feta)
-otfs = map (lambda x: 'emmentaler-' + x, feta_sizes) + ['aybabtu']
-
-t = map (env.TFM, fonts)
-g = map (env.GTABLE, fonts)
-p = map (env.PFA, fonts)
-e = map (lambda x: x + '.enc', fonts)
-s = map (lambda x: x + '.svg', fonts)
-o = map (env.OTF, otfs)
-
-# Emmentaler
-a = '''cat ${SOURCE} \
-$$(echo ${SOURCE} | grep -v brace | sed s/feta/parmesan/) \
-$$(echo ${SOURCE} | grep -v brace | sed s/feta/feta-alphabet/) \
-> ${TARGET}'''
-otf_table = Builder (action = a, suffix = '.otf-table',
- # barf
- src_suffix = '.lisp')
-env.Append (BUILDERS = {'OTF_TABLE': otf_table})
-f = map (env.OTF_TABLE, feta)
-g = map (env.OTF_TABLE, feta_braces)
-
-map (lambda x: env.Depends ('feta' + x + '.otf-table',
- ['parmesan' + x + '.lisp',
- 'feta-alphabet' + x + '.lisp']), feta_sizes)
-
-map (lambda x: env.Depends ('emmentaler-' + x + '.otf',
- 'feta' + x + '.otf-table'),
- feta_sizes)
-
-map (lambda x: env.Depends ('emmentaler-' + x + '.otf',
- ['feta' + x + '.pfa',
- 'parmesan' + x + '.pfa',
- 'feta-alphabet' + x + '.pfa']), feta_sizes)
-
-for i in feta_sizes:
- env.Command ('emmentaler-%(i)s.pe' % locals (),
- '$srcdir/buildscripts/gen-emmentaler-scripts.py',
- '$PYTHON $srcdir/buildscripts/gen-emmentaler-scripts.py --dir=${TARGET.dir}')
-
-map (lambda x: env.Depends (x + '.pfa', x + '.enc'), feta)
-
-
-# Aybabtu
-
-feta_braces_pfa = map (lambda x: x + '.pfa', feta_braces)
-
-env.AT_COPY ('aybabtu.pe.in')
-env.Command ('aybabtu.fontname', '', 'echo -n aybabtu > $TARGET')
-env.Command ('aybabtu.subfonts',
- map (lambda x: x + '.mf', feta_braces),
- 'echo ${SOURCES.filebase} > $TARGET')
-
-env.Command ('aybabtu.otf-table',
- map (lambda x: x + '.otf-table', feta_braces),
- 'cd ${TARGET.dir} && cat ${SOURCES.file} > ${TARGET.file}')
-
-env.Command ('aybabtu.otf-gtable',
- map (lambda x: x + '.otf-gtable', feta_braces),
- 'echo "(design_size . 20)" > $TARGET')
-
-env.Depends ('aybabtu.otf',
- feta_braces_pfa
- + ['aybabtu.subfonts',
- 'aybabtu.fontname',
- 'aybabtu.otf-table',
- 'aybabtu.otf-gtable'])
-
-## FIXME: building only a few fonts does not seem to work anymore.
-## what is essential these days, aybabtu/emmentaler are needed always?
-mf_essential = ['feta16', 'feta20', 'parmesan16', ]
-pfa_essential = map (env.PFA, mf_essential) + ['emmentaler-20.otf']
-env.Alias ('mf-essential', pfa_essential)
-env.Alias ('mf-essential', 'fonts.cache-1')
-
-env['fonts'] = string.join (fonts)
-env['feta_sizes'] = string.join (feta_sizes)
-
-env.Alias ('mf', pfa_essential + p + map (lambda x: x[0], o))
-env.Alias ('mf', s)
-env.Alias ('mf', 'fonts.cache-1')
-
-install (t, env['sharedir_package_version'] + '/fonts/tfm')
-install (p, env['sharedir_package_version'] + '/fonts/type1')
-install (o, env['sharedir_package_version'] + '/fonts/otf')
-install (e, env['sharedir_package_version'] + '/ps')
-install (s, env['sharedir_package_version'] + '/fonts/svg')
-
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'base_glob', 'install')
-pos = base_glob ('*.po')
-mos = map (env.MO, pos)
-
-install (mos, env['localedir'])
-
-env.Depends ('po', 'po-update')
-# map (lambda x: env.Depends (x + '.mo', x + '.pom'), pos)
-poms = map (env.POMERGE, pos)
-env.Alias ('po-update', poms)
-env.Alias ('po', mos)
-#env.Alias ('all', mos)
-
the file may accumulate the list of obsolete translations, which may
help to translate some changed entries and may be safely dropped out.
-* because I never install LilyPond, I (check-out buildscripts/set-lily.sh)
- made these links:
+* because I never install LilyPond, I made these links:
ln -s $LILYPOND_SOURCEDIR/po/out/nl.mo
$PREFIX/usr/share/locale/nl/LC_MESSAGES/lilypond.mo
+++ /dev/null
-# -*-python-*-
-
-Import ('env')
-
-dir = env['DESTDIR'] + env['sharedir_package_version'] + '/ps'
-env.Install (dir, ['lilyponddefs.ps',])
-env.Alias ('install', dir)
-
-dir = env['DESTDIR'] + env['sharedir_package_version'] + '/tex'
-env.Install (dir, ['music-drawing-routines.ps',])
-env.Alias ('install', dir)
depth = ..
+SUBDIRS=aux
+
STEPMAKE_TEMPLATES=c python-module install-out po
include $(depth)/make/stepmake.make
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'install', 'src_glob')
-py = src_glob ('*.py')
-c = src_glob ('*.c')
-
-cm = map (env.SharedObject, c)
-
-py
-cm
-
-install (py, env['sharedir_package_version'] + '/python')
-install (cm, env['libdir_package_version'] + '/python')
--- /dev/null
+depth=../..
+
+EXTRA_DIST_FILES = $(call src-wildcard,*.py)
+
+include $(depth)/make/stepmake.make
+
+default:
+
+local-clean:
+ rm -f *.pyc
--- /dev/null
+#!@PYTHON@
+
+import subprocess
+import re
+import sys
+
+verbose = False
+
+def read_pipe (command):
+ child = subprocess.Popen (command,
+ stdout = subprocess.PIPE,
+ stderr = subprocess.PIPE,
+ shell = True)
+ (output, error) = child.communicate ()
+ code = str (child.wait ())
+ if not child.stdout or child.stdout.close ():
+ print "pipe failed: %(command)s" % locals ()
+ if code != '0':
+ error = code + ' ' + error
+ return (output, error)
+
+revision_re = re.compile ('GIT [Cc]ommittish:\s+([a-f0-9]+)')
+vc_diff_cmd = 'git diff %(color_flag)s %(revision)s HEAD -- %(original)s | cat'
+
+def check_translated_doc (original, translated_file, translated_contents, color=False):
+ m = revision_re.search (translated_contents)
+ if not m:
+ sys.stderr.write ('error: ' + translated_file + \
+ ": no 'GIT committish: <hash>' found.\nPlease check " + \
+ 'the whole file against the original in English, then ' + \
+ 'fill in HEAD committish in the header.\n')
+ sys.exit (1)
+ revision = m.group (1)
+
+ if color:
+ color_flag = '--color'
+ else:
+ color_flag = '--no-color'
+ c = vc_diff_cmd % vars ()
+ if verbose:
+ sys.stderr.write ('running: ' + c)
+ return read_pipe (c)
--- /dev/null
+#!/usr/bin/python
+
+# This module is imported by check_texi_refs.py
+
+references_dict = {
+ 'lilypond': 'ruser',
+ 'lilypond-learning': 'rlearning',
+ 'lilypond-program': 'rprogram',
+ 'lilypond-snippets': 'rlsr',
+ 'music-glossary': 'rglos',
+ 'lilypond-internals': 'rinternals' }
--- /dev/null
+#!@PYTHON@
+
+import re
+import os
+
+def new_link_path (link, dir, r):
+ l = link.split ('/')
+ d = dir.split ('/')
+ i = 0
+ while i < len(d) and i < len(l) and l[i] == '..':
+ if r.match (d[i]):
+ del l[i]
+ else:
+ i += 1
+ return '/'.join ([x for x in l if not r.match (x)])
+
+def walk_tree (tree_roots = [],
+ process_dirs = '.*',
+ exclude_dirs = '',
+ find_files = '.*',
+ exclude_files = ''):
+ """Walk directory trees and.returns (dirs, symlinks, files, extra_files) tuple.
+
+ Arguments:
+ tree_roots=DIRLIST use DIRLIST as tree roots list
+ process_dir=PATTERN only process files in directories named PATTERN
+ exclude_dir=PATTERN don't recurse into directories named PATTERN
+ find_files=PATTERN filters files which are hardlinked
+ exclude_files=PATTERN exclude files named PATTERN
+ """
+ find_files_re = re.compile (find_files)
+ exclude_dirs_re = re.compile (exclude_dirs)
+ exclude_files_re = re.compile (exclude_files)
+ process_dirs_re = re.compile (process_dirs)
+
+ dirs_paths = []
+ symlinks_paths = []
+ files_paths = []
+
+ for d in tree_roots:
+ for current_dir, dirs, files in os.walk(d):
+ i = 0
+ while i < len(dirs):
+ if exclude_dirs_re.search (os.path.join (current_dir, dirs[i])):
+ del dirs[i]
+ else:
+ p = os.path.join (current_dir, dirs[i])
+ if os.path.islink (p):
+ symlinks_paths.append (p)
+ i += 1
+ if not process_dirs_re.search (current_dir):
+ continue
+ dirs_paths.append (current_dir)
+ for f in files:
+ if exclude_files_re.match (f):
+ continue
+ p = os.path.join (current_dir, f)
+ if os.path.islink (p):
+ symlinks_paths.append (p)
+ elif find_files_re.match (f):
+ files_paths.append (p)
+ return (dirs_paths, symlinks_paths, files_paths)
--- /dev/null
+#!@PYTHON@
+
+"""
+Postprocess HTML files:
+add footer, tweak links, add language selection menu.
+"""
+import re
+import os
+import time
+import operator
+
+import langdefs
+
+# This is to try to make the docball not too big with almost duplicate files
+# see process_links()
+non_copied_pages = ['Documentation/user/out-www/lilypond-big-page',
+ 'Documentation/user/out-www/lilypond-internals-big-page',
+ 'Documentation/user/out-www/lilypond-learning-big-page',
+ 'Documentation/user/out-www/lilypond-program-big-page',
+ 'Documentation/user/out-www/music-glossary-big-page',
+ 'out-www/examples',
+ 'Documentation/topdocs',
+ 'Documentation/bibliography',
+ 'Documentation/out-www/THANKS',
+ 'Documentation/out-www/DEDICATION',
+ 'Documentation/out-www/devel',
+ 'input/']
+
+def _doc (s):
+ return s
+
+header = r"""
+"""
+
+footer = '''
+<div class="footer">
+<p class="footer_version">
+%(footer_name_version)s
+</p>
+<p class="footer_report">
+%(footer_report_links)s
+</p>
+</div>
+'''
+footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).')
+# ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process
+footer_report_links = _doc ('Your <a href="%(suggest_Docs_url)s">suggestions for the documentation</a> are welcome, please report errors to our <a href="%(mail_address_url)s">bug list</a>.')
+
+
+mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs'
+suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding'
+
+header_tag = '<!-- header_tag -->'
+header_tag_re = re.compile (header_tag)
+
+footer_tag = '<!-- footer_tag -->'
+footer_tag_re = re.compile (footer_tag)
+
+lang_available = _doc ("Other languages: %s.")
+browser_lang = _doc ('About <A HREF="%s">automatic language selection</A>.')
+browser_language_url = "/web/about/browser-language"
+
+LANGUAGES_TEMPLATE = '''
+<p id="languages">
+ %(language_available)s
+ <br/>
+ %(browser_language)s
+</p>
+'''
+
+
+html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$')
+pages_dict = {}
+
+def build_pages_dict (filelist):
+ """Build dictionary of available translations of each page"""
+ global pages_dict
+ for f in filelist:
+ m = html_re.match (f)
+ if m:
+ g = m.groups()
+ if len (g) <= 1 or g[1] == None:
+ e = ''
+ else:
+ e = g[1]
+ if not g[0] in pages_dict:
+ pages_dict[g[0]] = [e]
+ else:
+ pages_dict[g[0]].append (e)
+
+def source_links_replace (m, source_val):
+ return 'href="' + os.path.join (source_val, m.group (1)) + '"'
+
+splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\
+Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\
+lilypond-learning))/')
+
+snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets')
+user_ref_re = re.compile ('href="(?:\.\./)?lilypond\
+(-internals|-learning|-program|(?!-snippets))')
+
+docindex_link_re = re.compile (r'href="index.html"')
+
+
+## Windows does not support symlinks.
+# This function avoids creating symlinks for splitted HTML manuals
+# Get rid of symlinks in GNUmakefile.in (local-WWW-post)
+# this also fixes missing PNGs only present in translated docs
+def hack_urls (s, prefix):
+ if splitted_docs_re.match (prefix):
+ s = re.sub ('(href|src)="(../lily-.*?|.*?[.]png)"', '\\1="../\\2"', s)
+
+ # fix xrefs between documents in different directories ad hoc
+ if 'user/out-www/lilypond' in prefix:
+ s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s)
+ elif 'input/lsr' in prefix:
+ s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s)
+
+ # we also need to replace in the lsr, which is already processed above!
+ if 'input/' in prefix or 'Documentation/topdocs' in prefix:
+ # fix the link from the regtest, lsr and topdoc pages to the doc index
+ # (rewrite prefix to obtain the relative path of the doc index page)
+ rel_link = re.sub (r'out-www/.*$', '', prefix)
+ rel_link = re.sub (r'[^/]*/', '../', rel_link)
+ if 'input/regression' in prefix:
+ indexfile = "Documentation/devel"
+ else:
+ indexfile = "index"
+ s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s)
+
+ source_path = os.path.join (os.path.dirname (prefix), 'source')
+ if not os.path.islink (source_path):
+ return s
+ source_val = os.readlink (source_path)
+ return re.sub ('href="source/(.*?)"', lambda m: source_links_replace (m, source_val), s)
+
+body_tag_re = re.compile ('(?i)<body([^>]*)>')
+html_tag_re = re.compile ('(?i)<html>')
+doctype_re = re.compile ('(?i)<!DOCTYPE')
+doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
+css_re = re.compile ('(?i)<link rel="stylesheet" type="text/css" ([^>]*)href="[^">]*?lilypond.*\.css"([^>]*)>')
+end_head_tag_re = re.compile ('(?i)</head>')
+css_link = """ <link rel="stylesheet" type="text/css" title="Patrick McCarty's design" href="%(rel)sDocumentation/lilypond-mccarty.css">
+ <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond.css" title="Andrew Hawryluk's design">
+ <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond-blue.css" title="Kurt Kroon's blue design">
+ <!--[if lte IE 7]>
+ <link href="%(rel)sDocumentation/lilypond-ie-fixes.css" rel="stylesheet" type="text/css">
+ <![endif]-->
+"""
+
+
+def add_header (s, prefix):
+ """Add header (<body>, doctype and CSS)"""
+ if header_tag_re.search (s) == None:
+ body = '<body\\1>'
+ (s, n) = body_tag_re.subn (body + header, s, 1)
+ if not n:
+ (s, n) = html_tag_re.subn ('<html>' + header, s, 1)
+ if not n:
+ s = header + s
+
+ s = header_tag + '\n' + s
+
+ if doctype_re.search (s) == None:
+ s = doctype + s
+
+ if css_re.search (s) == None:
+ depth = (prefix.count ('/') - 1) * '../'
+ s = end_head_tag_re.sub ((css_link % {'rel': depth}) + '</head>', s)
+ return s
+
+title_tag_re = re.compile ('.*?<title>(.*?)</title>', re.DOTALL)
+AT_web_title_re = re.compile ('@WEB-TITLE@')
+
+def add_title (s):
+ # urg
+ # maybe find first node?
+ fallback_web_title = '-- --'
+ m = title_tag_re.match (s)
+ if m:
+ fallback_web_title = m.group (1)
+ s = AT_web_title_re.sub (fallback_web_title, s)
+ return s
+
+footer_insert_re = re.compile ('<!--\s*FOOTER\s*-->')
+end_body_re = re.compile ('(?i)</body>')
+end_html_re = re.compile ('(?i)</html>')
+
+def add_footer (s, footer_text):
+ """add footer"""
+ (s, n) = footer_insert_re.subn (footer_text + '\n' + '<!-- FOOTER -->', s, 1)
+ if not n:
+ (s, n) = end_body_re.subn (footer_text + '\n' + '</body>', s, 1)
+ if not n:
+ (s, n) = end_html_re.subn (footer_text + '\n' + '</html>', s, 1)
+ if not n:
+ s += footer_text + '\n'
+ return s
+
+def find_translations (prefix, lang_ext):
+ """find available translations of a page"""
+ available = []
+ missing = []
+ for l in langdefs.LANGUAGES:
+ e = l.webext
+ if lang_ext != e:
+ if e in pages_dict[prefix]:
+ available.append (l)
+ elif lang_ext == '' and l.enabled and reduce (operator.and_,
+ [not prefix.startswith (s)
+ for s in non_copied_pages]):
+ # English version of missing translated pages will be written
+ missing.append (e)
+ return available, missing
+
+online_links_re = re.compile ('''(href|src)=['"]\
+((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\
+([.]html)(#[^"']*|)['"]''')
+offline_links_re = re.compile ('href=[\'"]\
+((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]')
+big_page_name_re = re.compile ('''(.+?)-big-page''')
+
+def process_i18n_big_page_links (match, prefix, lang_ext):
+ big_page_name = big_page_name_re.match (match.group (1))
+ if big_page_name:
+ destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix),
+ big_page_name.group (0)))
+ if not lang_ext in pages_dict[destination_path]:
+ return match.group (0)
+ return 'href="' + match.group (1) + '.' + lang_ext \
+ + match.group (2) + match.group (3) + '"'
+
+def process_links (s, prefix, lang_ext, file_name, missing, target):
+ page_flavors = {}
+ if target == 'online':
+ # Strip .html, suffix for auto language selection (content
+ # negotiation). The menu must keep the full extension, so do
+ # this before adding the menu.
+ page_flavors[file_name] = \
+ [lang_ext, online_links_re.sub ('\\1="\\2\\4"', s)]
+ elif target == 'offline':
+ # in LANG doc index: don't rewrite .html suffixes
+ # as not all .LANG.html pages exist;
+ # the doc index should be translated and contain links with the right suffixes
+ if prefix == 'Documentation/out-www/index':
+ page_flavors[file_name] = [lang_ext, s]
+ elif lang_ext == '':
+ page_flavors[file_name] = [lang_ext, s]
+ for e in missing:
+ page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = \
+ [e, offline_links_re.sub ('href="\\1.' + e + '\\2\\3"', s)]
+ else:
+ # For saving bandwidth and disk space, we don't duplicate big pages
+ # in English, so we must process translated big pages links differently.
+ if 'big-page' in prefix:
+ page_flavors[file_name] = \
+ [lang_ext,
+ offline_links_re.sub \
+ (lambda match: process_i18n_big_page_links (match, prefix, lang_ext),
+ s)]
+ else:
+ page_flavors[file_name] = \
+ [lang_ext,
+ offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)]
+ return page_flavors
+
+def add_menu (page_flavors, prefix, available, target, translation):
+ for k in page_flavors:
+ language_menu = ''
+ languages = ''
+ if page_flavors[k][0] != '':
+ t = translation[page_flavors[k][0]]
+ else:
+ t = _doc
+ for lang in available:
+ lang_file = lang.file_name (os.path.basename (prefix), '.html')
+ if language_menu != '':
+ language_menu += ', '
+ language_menu += '<a href="%s">%s</a>' % (lang_file, t (lang.name))
+ if target == 'offline':
+ browser_language = ''
+ elif target == 'online':
+ browser_language = t (browser_lang) % browser_language_url
+ if language_menu:
+ language_available = t (lang_available) % language_menu
+ languages = LANGUAGES_TEMPLATE % vars ()
+ page_flavors[k][1] = add_footer (page_flavors[k][1], languages)
+ return page_flavors
+
+
+def process_html_files (package_name = '',
+ package_version = '',
+ target = 'offline',
+ name_filter = lambda s: s):
+ """Add header, footer and tweak links to a number of HTML files
+
+ Arguments:
+ package_name=NAME set package_name to NAME
+ package_version=VERSION set package version to VERSION
+ targets=offline|online set page processing depending on the target
+ offline is for reading HTML pages locally
+ online is for hosting the HTML pages on a website with content
+ negotiation
+ name_filter a HTML file name filter
+ """
+ translation = langdefs.translation
+ localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
+
+ if "http://" in mail_address:
+ mail_address_url = mail_address
+ else:
+ mail_address_url= 'mailto:' + mail_address
+
+ versiontup = package_version.split ('.')
+ branch_str = _doc ('stable-branch')
+ if int (versiontup[1]) % 2:
+ branch_str = _doc ('development-branch')
+
+ # Initialize dictionaries for string formatting
+ subst = {}
+ subst[''] = dict ([i for i in globals ().items() if type (i[1]) is str])
+ subst[''].update (dict ([i for i in locals ().items() if type (i[1]) is str]))
+ for l in translation:
+ e = langdefs.LANGDICT[l].webext
+ if e:
+ subst[e] = {}
+ for name in subst['']:
+ subst[e][name] = translation[l] (subst[''][name])
+ # Do deeper string formatting as early as possible,
+ # so only one '%' formatting pass is needed later
+ for e in subst:
+ subst[e]['footer_name_version'] = subst[e]['footer_name_version'] % subst[e]
+ subst[e]['footer_report_links'] = subst[e]['footer_report_links'] % subst[e]
+
+ for prefix, ext_list in pages_dict.items ():
+ for lang_ext in ext_list:
+ file_name = langdefs.lang_file_name (prefix, lang_ext, '.html')
+ in_f = open (file_name)
+ s = in_f.read()
+ in_f.close()
+
+ s = s.replace ('%', '%%')
+ s = hack_urls (s, prefix)
+ s = add_header (s, prefix)
+
+ ### add footer
+ if footer_tag_re.search (s) == None:
+ s = add_footer (s, footer_tag + footer)
+
+ available, missing = find_translations (prefix, lang_ext)
+ page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target)
+ # Add menu after stripping: must not have autoselection for language menu.
+ page_flavors = add_menu (page_flavors, prefix, available, target, translation)
+ for k in page_flavors:
+ page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]]
+ out_f = open (name_filter (k), 'w')
+ out_f.write (page_flavors[k][1])
+ out_f.close()
+ # if the page is translated, a .en.html symlink is necessary for content negotiation
+ if target == 'online' and ext_list != ['']:
+ os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html'))
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'install', 'src_glob')
-sources = src_glob ('*.scm')
-install (sources, env['sharedir_package_version'] + '/scm')
(if (and (not embed)
(equal? 'regular (stat:type (stat full-name)))
(equal? name (ly:ttf-ps-name full-name)))
- (set! embed (font-file-as-ps-string name full-name)))
+ (set! embed (font-file-as-ps-string name full-name 0)))
(if (or (equal? "." f)
(equal? ".." f))
#t
depth = ..
+SUBDIRS=aux build
+
SEXECUTABLES=convert-ly lilypond-book abc2ly etf2ly midi2ly lilypond-invoke-editor musicxml2ly lilysong lilymidi
STEPMAKE_TEMPLATES=script help2man po
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'install', 'src_glob')
-sources = src_glob ('*.py')
-scripts = map (env.AT_COPY, sources)
-
-install (scripts, env['bindir'])
-
-po = env.Command ('lilypond.po', sources, env['pocommand'])
-env.Alias ('po-update', po)
--- /dev/null
+depth=../..
+
+EXTRA_DIST_FILES = $(call src-wildcard,*.sh) $(call src-wildcard,*.py)
+EXTRA_DIST_FILES += pfx2ttf.fontforge
+
+include $(depth)/make/stepmake.make
+
+default:
--- /dev/null
+#!/bin/sh
+
+if test "$1" == "--fresh"; then
+ fresh=yes
+fi
+
+if test ! -f config-cov.make; then
+ fresh=yes
+fi
+
+if test "$fresh" = "yes";
+then
+ ./configure --enable-config=cov --disable-optimising \
+ && make conf=cov -j2 clean \
+ && perl -i~ -pe 's/-pipe /-fprofile-arcs -ftest-coverage -pipe /g' config-cov.make \
+ && perl -i~ -pe 's/ -ldl / -lgcov -ldl /g' config-cov.make
+else
+ find -name '*.gcda' -exec rm '{}' ';'
+fi
+
+mkdir -p scripts/out-cov/
+touch scripts/out-cov/midi2ly scripts/out-cov/midi2ly.1
+make conf=cov -j2 && \
+ make conf=cov test-clean OUT_TEST=testcov LILYPOND_JOBS= && \
+ make conf=cov test OUT_TEST=testcov LILYPOND_JOBS='-dtrace-scheme-coverage '
+
+if test "$?" != "0"; then
+ tail -100 out-cov/test-run.log
+ exit 1
+fi
+
+depth=../..
+resultdir=out/coverage-results
+
+rm -rf $resultdir
+mkdir $resultdir
+cd $resultdir
+
+ln $depth/lily/* .
+ln $depth/scm/*.scm .
+mv $depth/input/regression/out-testcov/*.scm.cov .
+ln $depth/ly/*.ly .
+ln $depth/lily/out-cov/*[ch] .
+mkdir include
+ln $depth/lily/include/* include/
+ln $depth/flower/include/* include/
+for a in *[cl] *.yy
+do
+ gcov -o $depth/lily/out-cov/ -p $a > $a.gcov-summary
+done
+
+$depth/scripts/aux/coverage.py --uncovered *.cc > uncovered.txt
+$depth/scripts/aux/coverage.py --hotspots *.cc > hotspots.txt
+$depth/scripts/aux/coverage.py --summary *.cc > summary.txt
+$depth/scripts/aux/coverage.py --uncovered *.scm > uncovered-scheme.txt
+
+head -20 summary.txt
+
+cat <<EOF
+results in
+
+ out/coverage-results/summary.txt
+ out/coverage-results/uncovered.txt
+ out/coverage-results/uncovered-scheme.txt
+ out/coverage-results/hotspots.txt
+
+EOF
--- /dev/null
+#!/bin/sh
+
+if test "$1" == "--fresh"; then
+ fresh=yes
+fi
+
+if test ! -f config-prof.make; then
+ fresh=yes
+fi
+
+if test "$fresh" = "yes";
+then
+ ./configure --enable-config=prof --enable-optimising \
+ && perl -i~ -pe 's/-pipe /-pg -pipe /g' config-prof.make \
+ && perl -i~ -pe 's/ -ldl / -pg -ldl /g' config-prof.make
+fi
+
+make conf=prof -j2
+
+if test "$?" != "0"; then
+ exit 2
+fi
+
+depth=../..
+resultdir=out/profile-results
+
+rm -rf $resultdir
+mkdir $resultdir
+cd $resultdir
+
+
+cat > long-score.ly << EOF
+\version "2.10.0"
+foo = \new Staff \new Voice \repeat unfold 50 \relative { c4 d8[ d16( e]~ e16[ e e) f] g8 }
+\score {
+ \new ChoirStaff <<
+ \foo \foo \foo \foo
+ \foo \foo \foo \foo
+
+ >>
+ \midi {}
+ \layout {}
+}
+EOF
+
+rm gmon.sum
+
+exe=$depth/out-prof/bin/lilypond
+
+## todo: figure out representative sample.
+files="wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 mozart-hrn-3 mozart-hrn-3 long-score"
+
+
+
+$exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
+ -I $depth/input/mutopia/W.A.Mozart/ \
+ $files
+
+
+for a in *.profile; do
+ echo $a
+ cat $a
+done
+
+echo 'running gprof'
+gprof $exe > profile
+
+exit 0
+
+
+## gprof -s takes forever.
+for a in seq 1 3; do
+ for f in $files ; do
+ $exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
+ -I $depth/input/mutopia/W.A.Mozart/ \
+ $f
+
+ echo 'running gprof'
+ if test -f gmon.sum ; then
+ gprof -s $exe gmon.out gmon.sum
+ else
+ mv gmon.out gmon.sum
+ fi
+ done
+done
+
+gprof $exe gmon.sum > profile
--- /dev/null
+#!/usr/bin/env python
+
+"""
+check_texi_refs.py
+Interactive Texinfo cross-references checking and fixing tool
+
+"""
+
+
+import sys
+import re
+import os
+import optparse
+import imp
+
+outdir = 'out-www'
+
+log = sys.stderr
+stdout = sys.stdout
+
+file_not_found = 'file not found in include path'
+
+warn_not_fixed = '*** Warning: this broken x-ref has not been fixed!\n'
+
+opt_parser = optparse.OptionParser (usage='check_texi_refs.py [OPTION]... FILE',
+ description='''Check and fix \
+cross-references in a collection of Texinfo
+documents heavily cross-referenced each other.
+''')
+
+opt_parser.add_option ('-a', '--auto-fix',
+ help="Automatically fix cross-references whenever \
+it is possible",
+ action='store_true',
+ dest='auto_fix',
+ default=False)
+
+opt_parser.add_option ('-b', '--batch',
+ help="Do not run interactively",
+ action='store_false',
+ dest='interactive',
+ default=True)
+
+opt_parser.add_option ('-c', '--check-comments',
+ help="Also check commented out x-refs",
+ action='store_true',
+ dest='check_comments',
+ default=False)
+
+opt_parser.add_option ('-p', '--check-punctuation',
+ help="Check punctuation after x-refs",
+ action='store_true',
+ dest='check_punctuation',
+ default=False)
+
+opt_parser.add_option ("-I", '--include', help="add DIR to include path",
+ metavar="DIR",
+ action='append', dest='include_path',
+ default=[os.path.abspath (os.getcwd ())])
+
+(options, files) = opt_parser.parse_args ()
+
+class InteractionError (Exception):
+ pass
+
+
+manuals_defs = imp.load_source ('manuals_defs', files[0])
+manuals = {}
+
+def find_file (name, prior_directory='.'):
+ p = os.path.join (prior_directory, name)
+ out_p = os.path.join (prior_directory, outdir, name)
+ if os.path.isfile (p):
+ return p
+ elif os.path.isfile (out_p):
+ return out_p
+
+ # looking for file in include_path
+ for d in options.include_path:
+ p = os.path.join (d, name)
+ if os.path.isfile (p):
+ return p
+
+ # file not found in include_path: looking in `outdir' subdirs
+ for d in options.include_path:
+ p = os.path.join (d, outdir, name)
+ if os.path.isfile (p):
+ return p
+
+ raise EnvironmentError (1, file_not_found, name)
+
+
+exit_code = 0
+
+def set_exit_code (n):
+ global exit_code
+ exit_code = max (exit_code, n)
+
+
+if options.interactive:
+ try:
+ import readline
+ except:
+ pass
+
+ def yes_prompt (question, default=False, retries=3):
+ d = {True: 'y', False: 'n'}.get (default, False)
+ while retries:
+ a = raw_input ('%s [default: %s]' % (question, d) + '\n')
+ if a.lower ().startswith ('y'):
+ return True
+ if a.lower ().startswith ('n'):
+ return False
+ if a == '' or retries < 0:
+ return default
+ stdout.write ("Please answer yes or no.\n")
+ retries -= 1
+
+ def search_prompt ():
+ """Prompt user for a substring to look for in node names.
+
+If user input is empty or matches no node name, return None,
+otherwise return a list of (manual, node name, file) tuples.
+
+"""
+ substring = raw_input ("Enter a substring to search in node names \
+(press Enter to skip this x-ref):\n")
+ if not substring:
+ return None
+ substring = substring.lower ()
+ matches = []
+ for k in manuals:
+ matches += [(k, node, manuals[k]['nodes'][node][0])
+ for node in manuals[k]['nodes']
+ if substring in node.lower ()]
+ return matches
+
+else:
+ def yes_prompt (question, default=False, retries=3):
+ return default
+
+ def search_prompt ():
+ return None
+
+
+ref_re = re.compile \
+ ('@(ref|ruser|rlearning|rprogram|rglos)(?:\\{(?P<ref>[^,\\\\\\}]+?)|\
+named\\{(?P<refname>[^,\\\\]+?),(?P<display>[^,\\\\\\}]+?))\\}(?P<last>.)',
+ re.DOTALL)
+node_include_re = re.compile (r'(?m)^@(node|include)\s+(.+?)$')
+
+whitespace_re = re.compile (r'\s+')
+line_start_re = re.compile ('(?m)^')
+
+def which_line (index, newline_indices):
+ """Calculate line number of a given string index
+
+Return line number of string index index, where
+newline_indices is an ordered iterable of all newline indices.
+"""
+ inf = 0
+ sup = len (newline_indices) - 1
+ n = len (newline_indices)
+ while inf + 1 != sup:
+ m = (inf + sup) / 2
+ if index >= newline_indices [m]:
+ inf = m
+ else:
+ sup = m
+ return inf + 1
+
+
+comments_re = re.compile ('(?<!@)(@c(?:omment)? \
+.*?\\n|^@ignore\\n.*?\\n@end ignore\\n)', re.M | re.S)
+
+def calc_comments_boundaries (texinfo_doc):
+ return [(m.start (), m.end ()) for m in comments_re.finditer (texinfo_doc)]
+
+
+def is_commented_out (start, end, comments_boundaries):
+ for k in range (len (comments_boundaries)):
+ if (start > comments_boundaries[k][0]
+ and end <= comments_boundaries[k][1]):
+ return True
+ elif end <= comments_boundaries[k][0]:
+ return False
+ return False
+
+
+def read_file (f, d):
+ s = open (f).read ()
+ base = os.path.basename (f)
+ dir = os.path.dirname (f)
+
+ d['contents'][f] = s
+
+ d['newline_indices'][f] = [m.end () for m in line_start_re.finditer (s)]
+ if options.check_comments:
+ d['comments_boundaries'][f] = []
+ else:
+ d['comments_boundaries'][f] = calc_comments_boundaries (s)
+
+ for m in node_include_re.finditer (s):
+ if m.group (1) == 'node':
+ line = which_line (m.start (), d['newline_indices'][f])
+ d['nodes'][m.group (2)] = (f, line)
+
+ elif m.group (1) == 'include':
+ try:
+ p = find_file (m.group (2), dir)
+ except EnvironmentError, (errno, strerror):
+ if strerror == file_not_found:
+ continue
+ else:
+ raise
+ read_file (p, d)
+
+
+def read_manual (name):
+ """Look for all node names and cross-references in a Texinfo document
+
+Return a (manual, dictionary) tuple where manual is the cross-reference
+macro name defined by references_dict[name], and dictionary
+has the following keys:
+
+ 'nodes' is a dictionary of `node name':(file name, line number),
+
+ 'contents' is a dictionary of file:`full file contents',
+
+ 'newline_indices' is a dictionary of
+file:[list of beginning-of-line string indices],
+
+ 'comments_boundaries' is a list of (start, end) tuples,
+which contain string indices of start and end of each comment.
+
+Included files that can be found in the include path are processed too.
+
+"""
+ d = {}
+ d['nodes'] = {}
+ d['contents'] = {}
+ d['newline_indices'] = {}
+ d['comments_boundaries'] = {}
+ manual = manuals_defs.references_dict.get (name, '')
+ try:
+ f = find_file (name + '.tely')
+ except EnvironmentError, (errno, strerror):
+ if not strerror == file_not_found:
+ raise
+ else:
+ try:
+ f = find_file (name + '.texi')
+ except EnvironmentError, (errno, strerror):
+ if strerror == file_not_found:
+ sys.stderr.write (name + '.{texi,tely}: ' +
+ file_not_found + '\n')
+ return (manual, d)
+ else:
+ raise
+
+ log.write ("Processing manual %s (%s)\n" % (f, manual))
+ read_file (f, d)
+ return (manual, d)
+
+
+log.write ("Reading files...\n")
+
+manuals = dict ([read_manual (name)
+ for name in manuals_defs.references_dict.keys ()])
+
+ref_fixes = set ()
+bad_refs_count = 0
+fixes_count = 0
+
+def add_fix (old_type, old_ref, new_type, new_ref):
+ ref_fixes.add ((old_type, old_ref, new_type, new_ref))
+
+
+def lookup_fix (r):
+ found = []
+ for (old_type, old_ref, new_type, new_ref) in ref_fixes:
+ if r == old_ref:
+ found.append ((new_type, new_ref))
+ return found
+
+
+def preserve_linebreak (text, linebroken):
+ if linebroken:
+ if ' ' in text:
+ text = text.replace (' ', '\n', 1)
+ n = ''
+ else:
+ n = '\n'
+ else:
+ n = ''
+ return (text, n)
+
+
+def choose_in_numbered_list (message, string_list, sep=' ', retries=3):
+ S = set (string_list)
+ S.discard ('')
+ string_list = list (S)
+ numbered_list = sep.join ([str (j + 1) + '. ' + string_list[j]
+ for j in range (len (string_list))]) + '\n'
+ t = retries
+ while t > 0:
+ value = ''
+ stdout.write (message +
+ "(press Enter to discard and start a new search)\n")
+ input = raw_input (numbered_list)
+ if not input:
+ return ''
+ try:
+ value = string_list[int (input) - 1]
+ except IndexError:
+ stdout.write ("Error: index number out of range\n")
+ except ValueError:
+ matches = [input in v for v in string_list]
+ n = matches.count (True)
+ if n == 0:
+ stdout.write ("Error: input matches no item in the list\n")
+ elif n > 1:
+ stdout.write ("Error: ambiguous input (matches several items \
+in the list)\n")
+ else:
+ value = string_list[matches.index (True)]
+ if value:
+ return value
+ t -= 1
+ raise InteractionError ("%d retries limit exceeded" % retries)
+
+refs_count = 0
+
+def check_ref (manual, file, m):
+ global fixes_count, bad_refs_count, refs_count
+ refs_count += 1
+ bad_ref = False
+ fixed = True
+ type = m.group (1)
+ original_name = m.group ('ref') or m.group ('refname')
+ name = whitespace_re.sub (' ', original_name). strip ()
+ newline_indices = manuals[manual]['newline_indices'][file]
+ line = which_line (m.start (), newline_indices)
+ linebroken = '\n' in original_name
+ original_display_name = m.group ('display')
+ next_char = m.group ('last')
+ if original_display_name: # the xref has an explicit display name
+ display_linebroken = '\n' in original_display_name
+ display_name = whitespace_re.sub (' ', original_display_name). strip ()
+ commented_out = is_commented_out \
+ (m.start (), m.end (), manuals[manual]['comments_boundaries'][file])
+ useful_fix = not outdir in file
+
+ # check puncuation after x-ref
+ if options.check_punctuation and not next_char in '.,;:!?':
+ stdout.write ("Warning: %s: %d: `%s': x-ref \
+not followed by punctuation\n" % (file, line, name))
+
+ # validate xref
+ explicit_type = type
+ new_name = name
+
+ if type != 'ref' and type == manual and not commented_out:
+ if useful_fix:
+ fixed = False
+ bad_ref = True
+ stdout.write ("\n%s: %d: `%s': external %s x-ref should be internal\n"
+ % (file, line, name, type))
+ if options.auto_fix or yes_prompt ("Fix this?"):
+ type = 'ref'
+
+ if type == 'ref':
+ explicit_type = manual
+
+ if not name in manuals[explicit_type]['nodes'] and not commented_out:
+ bad_ref = True
+ fixed = False
+ stdout.write ('\n')
+ if type == 'ref':
+ stdout.write ("\e[1;31m%s: %d: `%s': wrong internal x-ref\e[0m\n"
+ % (file, line, name))
+ else:
+ stdout.write ("\e[1;31m%s: %d: `%s': wrong external `%s' x-ref\e[0m\n"
+ % (file, line, name, type))
+ # print context
+ stdout.write ('--\n' + manuals[manual]['contents'][file]
+ [newline_indices[max (0, line - 2)]:
+ newline_indices[min (line + 3,
+ len (newline_indices) - 1)]] +
+ '--\n')
+
+ # try to find the reference in other manuals
+ found = []
+ for k in [k for k in manuals if k != explicit_type]:
+ if name in manuals[k]['nodes']:
+ if k == manual:
+ found = ['ref']
+ stdout.write ("\e[1;32m found as internal x-ref\e[0m\n")
+ break
+ else:
+ found.append (k)
+ stdout.write ("\e[1;32m found as `%s' x-ref\e[0m\n" % k)
+
+ if (len (found) == 1
+ and (options.auto_fix or yes_prompt ("Fix this x-ref?"))):
+ add_fix (type, name, found[0], name)
+ type = found[0]
+ fixed = True
+
+ elif len (found) > 1 and useful_fix:
+ if options.interactive or options.auto_fix:
+ stdout.write ("* Several manuals contain this node name, \
+cannot determine manual automatically.\n")
+ if options.interactive:
+ t = choose_in_numbered_list ("Choose manual for this x-ref by \
+index number or beginning of name:\n", found)
+ if t:
+ add_fix (type, name, t, name)
+ type = t
+ fixed = True
+
+ if not fixed:
+ # try to find a fix already made
+ found = lookup_fix (name)
+
+ if len (found) == 1:
+ stdout.write ("Found one previous fix: %s `%s'\n" % found[0])
+ if options.auto_fix or yes_prompt ("Apply this fix?"):
+ type, new_name = found[0]
+ fixed = True
+
+ elif len (found) > 1:
+ if options.interactive or options.auto_fix:
+ stdout.write ("* Several previous fixes match \
+this node name, cannot fix automatically.\n")
+ if options.interactive:
+ concatened = choose_in_numbered_list ("Choose new manual \
+and x-ref by index number or beginning of name:\n", [''.join ([i[0], ' ', i[1]])
+ for i in found],
+ sep='\n')
+ if concatened:
+ type, new_name = concatenated.split (' ', 1)
+ fixed = True
+
+ if not fixed:
+ # all previous automatic fixing attempts failed,
+ # ask user for substring to look in node names
+ while True:
+ node_list = search_prompt ()
+ if node_list == None:
+ if options.interactive:
+ stdout.write (warn_not_fixed)
+ break
+ elif not node_list:
+ stdout.write ("No matched node names.\n")
+ else:
+ concatenated = choose_in_numbered_list ("Choose \
+node name and manual for this x-ref by index number or beginning of name:\n", \
+ [' '.join ([i[0], i[1], '(in %s)' % i[2]])
+ for i in node_list],
+ sep='\n')
+ if concatenated:
+ t, z = concatenated.split (' ', 1)
+ new_name = z.split (' (in ', 1)[0]
+ add_fix (type, name, t, new_name)
+ type = t
+ fixed = True
+ break
+
+ if fixed and type == manual:
+ type = 'ref'
+ bad_refs_count += int (bad_ref)
+ if bad_ref and not useful_fix:
+ stdout.write ("*** Warning: this file is automatically generated, \
+please fix the code source instead of generated documentation.\n")
+
+ # compute returned string
+ if new_name == name:
+ if bad_ref and (options.interactive or options.auto_fix):
+ # only the type of the ref was fixed
+ fixes_count += int (fixed)
+ if original_display_name:
+ return ('@%snamed{%s,%s}' % (type, original_name, original_display_name)) + next_char
+ else:
+ return ('@%s{%s}' % (type, original_name)) + next_char
+ else:
+ fixes_count += int (fixed)
+ (ref, n) = preserve_linebreak (new_name, linebroken)
+ if original_display_name:
+ if bad_ref:
+ stdout.write ("Current display name is `%s'\n")
+ display_name = raw_input \
+ ("Enter a new display name or press enter to keep the existing name:\n") \
+ or display_name
+ (display_name, n) = preserve_linebreak (display_name, display_linebroken)
+ else:
+ display_name = original_display_name
+ return ('@%snamed{%s,%s}' % (type, ref, display_name)) + \
+ next_char + n
+ else:
+ return ('@%s{%s}' % (type, ref)) + next_char + n
+
+
+log.write ("Checking cross-references...\n")
+
+try:
+ for key in manuals:
+ for file in manuals[key]['contents']:
+ s = ref_re.sub (lambda m: check_ref (key, file, m),
+ manuals[key]['contents'][file])
+ if s != manuals[key]['contents'][file]:
+ open (file, 'w').write (s)
+except KeyboardInterrupt:
+ log.write ("Operation interrupted, exiting.\n")
+ sys.exit (2)
+except InteractionError, instance:
+ log.write ("Operation refused by user: %s\nExiting.\n" % instance)
+ sys.exit (3)
+
+log.write ("\e[1;36mDone: %d x-refs found, %d bad x-refs found, fixed %d.\e[0m\n" %
+ (refs_count, bad_refs_count, fixes_count))
--- /dev/null
+#!/usr/bin/env python
+
+import __main__
+import optparse
+import os
+import sys
+
+import langdefs
+import buildlib
+
+verbose = 0
+use_colors = False
+lang = 'C'
+C = lang
+
+def dir_lang (file, lang, lang_dir_index):
+ path_components = file.split ('/')
+ path_components[lang_dir_index] = lang
+ return os.path.join (*path_components)
+
+def do_file (file_name, lang_codes, buildlib):
+ if verbose:
+ sys.stderr.write ('%s...\n' % file_name)
+ split_file_name = file_name.split ('/')
+ d1, d2 = split_file_name[0:2]
+ if d1 in lang_codes:
+ check_lang = d1
+ lang_dir_index = 0
+ elif d2 in lang_codes:
+ check_lang = d2
+ lang_dir_index = 1
+ else:
+ check_lang = lang
+ if check_lang == C:
+ raise Exception ('cannot determine language for ' + file_name)
+
+ original = dir_lang (file_name, '', lang_dir_index)
+ translated_contents = open (file_name).read ()
+ (diff_string, error) \
+ = buildlib.check_translated_doc (original,
+ file_name,
+ translated_contents,
+ color=use_colors and not update_mode)
+
+ if error:
+ sys.stderr.write ('warning: %s: %s' % (file_name, error))
+
+ if update_mode:
+ if error or len (diff_string) >= os.path.getsize (original):
+ buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + original)
+ elif diff_string:
+ diff_file = original + '.diff'
+ f = open (diff_file, 'w')
+ f.write (diff_string)
+ f.close ()
+ buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + diff_file)
+ os.remove (diff_file)
+ else:
+ sys.stdout.write (diff_string)
+
+def usage ():
+ sys.stdout.write (r'''
+Usage:
+check-translation [--language=LANG] [--verbose] [--update] FILE...
+
+This script is licensed under the GNU GPL.
+''')
+
+def do_options ():
+ global lang, verbose, update_mode, use_colors
+
+ p = optparse.OptionParser (usage="check-translation [--language=LANG] [--verbose] FILE...",
+ description="This script is licensed under the GNU GPL.")
+ p.add_option ("--language",
+ action='store',
+ default='site',
+ dest="language")
+ p.add_option ("--no-color",
+ action='store_false',
+ default=True,
+ dest="color",
+ help="do not print ANSI-cooured output")
+ p.add_option ("--verbose",
+ action='store_true',
+ default=False,
+ dest="verbose",
+ help="print details, including executed shell commands")
+ p.add_option ('-u', "--update",
+ action='store_true',
+ default=False,
+ dest='update_mode',
+ help='call $EDITOR to update the translation')
+
+ (options, files) = p.parse_args ()
+ verbose = options.verbose
+ lang = options.language
+ use_colors = options.color
+ update_mode = options.update_mode
+
+ return files
+
+def main ():
+ global update_mode, text_editor
+
+ files = do_options ()
+ if 'EDITOR' in os.environ:
+ text_editor = os.environ['EDITOR']
+ else:
+ update_mode = False
+
+ buildlib.verbose = verbose
+
+ for i in files:
+ do_file (i, langdefs.LANGDICT.keys (), buildlib)
+
+if __name__ == '__main__':
+ main ()
--- /dev/null
+#!/usr/bin/env python
+
+import os
+import glob
+import re
+import sys
+import optparse
+
+#File 'accidental-engraver.cc'
+#Lines executed:87.70% of 252
+
+def summary (args):
+ results = []
+ for f in args:
+ str = open (f).read ()
+ m = re.search ("File '([^']+.cc)'\s*Lines executed:([0-9.]+)% of ([0-9]+)", str)
+
+ if m and '/usr/lib' in m.group (1):
+ continue
+
+ if m:
+ cov = float (m.group (2))
+ lines = int (m.group (3))
+ pain = lines * (100.0 - cov)
+ file = m.group (1)
+ tup = (pain, locals ().copy())
+
+ results.append(tup)
+
+ results.sort ()
+ results.reverse()
+
+ print 'files sorted by number of untested lines (decreasing)'
+ print
+ print '%5s (%6s): %s' % ('cov %', 'lines', 'file')
+ print '----------------------------------------------'
+
+ for (pain, d) in results:
+ print '%(cov)5.2f (%(lines)6d): %(file)s' % d
+
+class Chunk:
+ def __init__ (self, range, coverage_count, all_lines, file):
+ assert coverage_count >= 0
+ assert type (range) == type (())
+
+ self.coverage_count = coverage_count
+ self.range = range
+ self.all_lines = all_lines
+ self.file = file
+
+ def length (self):
+ return self.range[1] - self.range[0]
+
+ def text (self):
+ return ''.join ([l[2] for l in self.lines()])
+
+ def lines (self):
+ return self.all_lines[self.range[0]:
+ self.range[1]]
+ def widen (self):
+ self.range = (min (self.range[0] -1, 0),
+ self.range[0] +1)
+ def write (self):
+ print 'chunk in', self.file
+ for (c, n, l) in self.lines ():
+ cov = '%d' % c
+ if c == 0:
+ cov = '#######'
+ elif c < 0:
+ cov = ''
+ sys.stdout.write ('%8s:%8d:%s' % (cov, n, l))
+
+ def uncovered_score (self):
+ return self.length ()
+
+class SchemeChunk (Chunk):
+ def uncovered_score (self):
+ text = self.text ()
+ if (text.startswith ('(define ')
+ and not text.startswith ('(define (')):
+ return 0
+
+ if text.startswith ('(use-modules '):
+ return 0
+
+ if (text.startswith ('(define-public ')
+ and not text.startswith ('(define-public (')):
+ return 0
+
+ return len ([l for (c,n,l) in self.lines() if (c == 0)])
+
+def read_gcov (f):
+ ls = []
+
+ in_lines = [l for l in open (f).readlines ()]
+ (count_len, line_num_len) = tuple (map (len, in_lines[0].split (':')[:2]))
+
+ for l in in_lines:
+ c = l[:count_len].strip ()
+ l = l[count_len+1:]
+ n = int (l[:line_num_len].strip ())
+
+ if n == 0:
+ continue
+
+ if '#' in c:
+ c = 0
+ elif c == '-':
+ c = -1
+ else:
+ c = int (c)
+
+ l = l[line_num_len+1:]
+
+ ls.append ((c,n,l))
+
+ return ls
+
+def get_c_chunks (ls, file):
+ chunks = []
+ chunk = []
+
+ last_c = -1
+ for (c, n, l) in ls:
+ if not (c == last_c or c < 0 and l != '}\n'):
+ if chunk and last_c >= 0:
+ nums = [n-1 for (n, l) in chunk]
+ chunks.append (Chunk ((min (nums), max (nums)+1),
+ last_c, ls, file))
+ chunk = []
+
+ chunk.append ((n,l))
+ if c >= 0:
+ last_c = c
+
+ return chunks
+
+def get_scm_chunks (ls, file):
+ chunks = []
+ chunk = []
+
+ def new_chunk ():
+ if chunk:
+ nums = [n-1 for (n, l) in chunk]
+ chunks.append (SchemeChunk ((min (nums), max (nums)+1),
+ max (last_c, 0), ls, file))
+ chunk[:] = []
+
+ last_c = -1
+ for (cov_count, line_number, line) in ls:
+ if line.startswith ('('):
+ new_chunk ()
+ last_c = -1
+
+ chunk.append ((line_number, line))
+ if cov_count >= 0:
+ last_c = cov_count
+
+ return chunks
+
+def widen_chunk (ch, ls):
+ a -= 1
+ b += 1
+
+ return [(n, l) for (c, n, l) in ls[a:b]]
+
+
+def extract_chunks (file):
+ try:
+ ls = read_gcov (file)
+ except IOError, s :
+ print s
+ return []
+
+ cs = []
+ if 'scm' in file:
+ cs = get_scm_chunks (ls, file)
+ else:
+ cs = get_c_chunks (ls, file)
+ return cs
+
+
+def filter_uncovered (chunks):
+ def interesting (c):
+ if c.coverage_count > 0:
+ return False
+
+ t = c.text()
+ for stat in ('warning', 'error', 'print', 'scm_gc_mark'):
+ if stat in t:
+ return False
+ return True
+
+ return [c for c in chunks if interesting (c)]
+
+
+def main ():
+ p = optparse.OptionParser (usage="usage coverage.py [options] files",
+ description="")
+ p.add_option ("--summary",
+ action='store_true',
+ default=False,
+ dest="summary")
+
+ p.add_option ("--hotspots",
+ default=False,
+ action='store_true',
+ dest="hotspots")
+
+ p.add_option ("--uncovered",
+ default=False,
+ action='store_true',
+ dest="uncovered")
+
+
+ (options, args) = p.parse_args ()
+
+
+ if options.summary:
+ summary (['%s.gcov-summary' % s for s in args])
+
+ if options.uncovered or options.hotspots:
+ chunks = []
+ for a in args:
+ name = a
+ if name.endswith ('scm'):
+ name += '.cov'
+ else:
+ name += '.gcov'
+
+ chunks += extract_chunks (name)
+
+ if options.uncovered:
+ chunks = filter_uncovered (chunks)
+ chunks = [(c.uncovered_score (), c) for c in chunks if c.uncovered_score() > 0]
+ elif options.hotspots:
+ chunks = [((c.coverage_count, -c.length()), c) for c in chunks]
+
+
+ chunks.sort ()
+ chunks.reverse ()
+ for (score, c) in chunks:
+ c.write ()
+
+
+
+if __name__ == '__main__':
+ main ()
--- /dev/null
+#!/usr/bin/env python
+import sys
+import re
+import os
+
+
+full_paths = {}
+incs = {}
+inc_re = re.compile ('^#include "([^"]+)"')
+def parse_file (fn):
+ lst = []
+
+ lc = 0
+ for l in open (fn).readlines():
+ lc += 1
+ m = inc_re.search (l)
+ if m:
+ lst.append ((lc, m.group (1)))
+
+ base = os.path.split (fn)[1]
+ full_paths[base] = fn
+ incs[base] = lst
+
+
+def has_include (f, name):
+ try:
+ return name in [b for (a,b) in incs[f]]
+ except KeyError:
+ return False
+
+for a in sys.argv:
+ parse_file (a)
+
+print '-*-compilation-*-'
+for (f, lst) in incs.items ():
+ for (n, inc) in lst:
+ for (n2, inc2) in lst:
+ if has_include (inc2, inc):
+ print "%s:%d: already have %s from %s" % (full_paths[f], n,
+ inc, inc2)
+ break
+
+
+
--- /dev/null
+#!/usr/bin/env python
+
+# fixcc -- nitpick lily's c++ code
+
+# TODO
+# * maintainable rules: regexp's using whitespace (?x) and match names
+# <identifier>)
+# * trailing `*' vs. function definition
+# * do not break/change indentation of fixcc-clean files
+# * check lexer, parser
+# * rewrite in elisp, add to cc-mode
+# * using regexes is broken by design
+# * ?
+# * profit
+
+import __main__
+import getopt
+import os
+import re
+import string
+import sys
+import time
+
+COMMENT = 'COMMENT'
+STRING = 'STRING'
+GLOBAL_CXX = 'GC++'
+CXX = 'C++'
+verbose_p = 0
+indent_p = 0
+
+rules = {
+ GLOBAL_CXX:
+ [
+ # delete gratuitous block
+ ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
+ '\n\\2;'),
+ ],
+ CXX:
+ [
+ # space before parenthesis open
+ ('([^\( \]])[ \t]*\(', '\\1 ('),
+ # space after comma
+ ("\([^'],\)[ \t]*", '\1 '),
+ # delete gratuitous block
+ ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
+ '\n\\2;'),
+ # delete inline tabs
+ ('(\w)\t+', '\\1 '),
+ # delete inline double spaces
+ (' *', ' '),
+ # delete space after parenthesis open
+ ('\([ \t]*', '('),
+ # delete space before parenthesis close
+ ('[ \t]*\)', ')'),
+ # delete spaces after prefix
+ ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'),
+ # delete spaces before postfix
+ ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'),
+ # delete space after parenthesis close
+ #('\)[ \t]*([^\w])', ')\\1'),
+ # delete space around operator
+ # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
+ ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
+ # delete space after operator
+ ('(::)([ \t]*)([\w\(\)])', '\\1\\3'),
+ # delete superflous space around operator
+ ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'),
+ # space around operator1
+ ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
+ # space around operator2
+ ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'),
+ # space around operator3
+ ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
+ # space around operator4
+ ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'),
+ # space around +/-; exponent
+ ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'),
+ ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'),
+ # trailing operator
+ (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '),
+ # pointer
+ ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
+ ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
+ #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'),
+ # pointer with template
+ ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'),
+ #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'),
+ # unary pointer, minus, not
+ ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'),
+ # space after `operator'
+ ('(\Woperator) *([^\w\s])', '\\1 \\2'),
+ # dangling brace close
+ ('\n[ \t]*(\n[ \t]*})', '\\1'),
+ # dangling newline
+ ('\n[ \t]*\n[ \t]*\n', '\n\n'),
+ # dangling parenthesis open
+ #('[ \t]*\n[ \t]*\([ \t]*\n', '('),
+ ('\([ \t]*\n', '('),
+ # dangling parenthesis close
+ ('\n[ \t]*\)', ')'),
+ # dangling comma
+ ('\n[ \t]*,', ','),
+ # dangling semicolon
+ ('\n[ \t]*;', ';'),
+ # brace open
+ ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'),
+ # brace open backslash
+ ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'),
+ # brace close
+ ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'),
+ # brace close backslash
+ ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'),
+ # delete space after `operator'
+ #('(\Woperator) (\W)', '\\1\\2'),
+ # delete space after case, label
+ ('(\W(case|label) ([\w]+)) :', '\\1:'),
+ # delete space before comma
+ ('[ \t]*,', ','),
+ # delete space before semicolon
+ ('[ \t]*;', ';'),
+ # delete space before eol-backslash
+ ('[ \t]*\\\\\n', '\\\n'),
+ # delete trailing whitespace
+ ('[ \t]*\n', '\n'),
+
+ ## Deuglify code that also gets ugly by rules above.
+ # delete newline after typedef struct
+ ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'),
+ # delete spaces around template brackets
+ #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'),
+ ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'),
+ ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'),
+ ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'),
+ ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'),
+ # do {..} while
+ ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'),
+
+ ## Fix code that gets broken by rules above.
+ ##('->\s+\*', '->*'),
+ # delete space before #define x()
+ ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('),
+ # add space in #define x ()
+ ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)',
+ '#define \\1 \\2'),
+ # delete space in #include <>
+ ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>',
+ '#include <\\1\\2\\3>'),
+ # delete backslash before empty line (emacs' indent region is broken)
+ ('\\\\\n\n', '\n\n'),
+ ],
+
+ COMMENT:
+ [
+ # delete trailing whitespace
+ ('[ \t]*\n', '\n'),
+ # delete empty first lines
+ ('(/\*\n)\n*', '\\1'),
+ # delete empty last lines
+ ('\n*(\n\*/)', '\\1'),
+ ## delete newline after start?
+ #('/(\*)\n', '\\1'),
+ ## delete newline before end?
+ #('\n(\*/)', '\\1'),
+ ],
+ }
+
+# Recognize special sequences in the input.
+#
+# (?P<name>regex) -- Assign result of REGEX to NAME.
+# *? -- Match non-greedily.
+# (?m) -- Multiline regex: Make ^ and $ match at each line.
+# (?s) -- Make the dot match all characters including newline.
+# (?x) -- Ignore whitespace in patterns.
+no_match = 'a\ba'
+snippet_res = {
+ CXX: {
+ 'multiline_comment':
+ r'''(?sx)
+ (?P<match>
+ (?P<code>
+ [ \t]*/\*.*?\*/))''',
+
+ 'singleline_comment':
+ r'''(?mx)
+ ^.*
+ (?P<match>
+ (?P<code>
+ [ \t]*//([ \t][^\n]*|)\n))''',
+
+ 'string':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ "([^\"\n](\")*)*"))''',
+
+ 'char':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ '([^']+|\')))''',
+
+ 'include':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ "#[ \t]*include[ \t]*<[^>]*>''',
+ },
+ }
+
+class Chunk:
+ def replacement_text (self):
+ return ''
+
+ def filter_text (self):
+ return self.replacement_text ()
+
+class Substring (Chunk):
+ def __init__ (self, source, start, end):
+ self.source = source
+ self.start = start
+ self.end = end
+
+ def replacement_text (self):
+ s = self.source[self.start:self.end]
+ if verbose_p:
+ sys.stderr.write ('CXX Rules')
+ for i in rules[CXX]:
+ if verbose_p:
+ sys.stderr.write ('.')
+ #sys.stderr.write ('\n\n***********\n')
+ #sys.stderr.write (i[0])
+ #sys.stderr.write ('\n***********\n')
+ #sys.stderr.write ('\n=========>>\n')
+ #sys.stderr.write (s)
+ #sys.stderr.write ('\n<<=========\n')
+ s = re.sub (i[0], i[1], s)
+ if verbose_p:
+ sys.stderr.write ('done\n')
+ return s
+
+
+class Snippet (Chunk):
+ def __init__ (self, type, match, format):
+ self.type = type
+ self.match = match
+ self.hash = 0
+ self.options = []
+ self.format = format
+
+ def replacement_text (self):
+ return self.match.group ('match')
+
+ def substring (self, s):
+ return self.match.group (s)
+
+ def __repr__ (self):
+ return `self.__class__` + ' type = ' + self.type
+
+class Multiline_comment (Snippet):
+ def __init__ (self, source, match, format):
+ self.type = type
+ self.match = match
+ self.hash = 0
+ self.options = []
+ self.format = format
+
+ def replacement_text (self):
+ s = self.match.group ('match')
+ if verbose_p:
+ sys.stderr.write ('COMMENT Rules')
+ for i in rules[COMMENT]:
+ if verbose_p:
+ sys.stderr.write ('.')
+ s = re.sub (i[0], i[1], s)
+ return s
+
+snippet_type_to_class = {
+ 'multiline_comment': Multiline_comment,
+# 'string': Multiline_comment,
+# 'include': Include_snippet,
+}
+
+def find_toplevel_snippets (s, types):
+ if verbose_p:
+ sys.stderr.write ('Dissecting')
+
+ res = {}
+ for i in types:
+ res[i] = re.compile (snippet_res[format][i])
+
+ snippets = []
+ index = 0
+ ## found = dict (map (lambda x: (x, None),
+ ## types))
+ ## urg python2.1
+ found = {}
+ map (lambda x, f = found: f.setdefault (x, None),
+ types)
+
+ # We want to search for multiple regexes, without searching
+ # the string multiple times for one regex.
+ # Hence, we use earlier results to limit the string portion
+ # where we search.
+ # Since every part of the string is traversed at most once for
+ # every type of snippet, this is linear.
+
+ while 1:
+ if verbose_p:
+ sys.stderr.write ('.')
+ first = None
+ endex = 1 << 30
+ for type in types:
+ if not found[type] or found[type][0] < index:
+ found[type] = None
+ m = res[type].search (s[index:endex])
+ if not m:
+ continue
+
+ cl = Snippet
+ if snippet_type_to_class.has_key (type):
+ cl = snippet_type_to_class[type]
+ snip = cl (type, m, format)
+ start = index + m.start ('match')
+ found[type] = (start, snip)
+
+ if found[type] \
+ and (not first \
+ or found[type][0] < found[first][0]):
+ first = type
+
+ # FIXME.
+
+ # Limiting the search space is a cute
+ # idea, but this *requires* to search
+ # for possible containing blocks
+ # first, at least as long as we do not
+ # search for the start of blocks, but
+ # always/directly for the entire
+ # @block ... @end block.
+
+ endex = found[first][0]
+
+ if not first:
+ snippets.append (Substring (s, index, len (s)))
+ break
+
+ (start, snip) = found[first]
+ snippets.append (Substring (s, index, start))
+ snippets.append (snip)
+ found[first] = None
+ index = start + len (snip.match.group ('match'))
+
+ return snippets
+
+def nitpick_file (outdir, file):
+ s = open (file).read ()
+
+ for i in rules[GLOBAL_CXX]:
+ s = re.sub (i[0], i[1], s)
+
+ # FIXME: Containing blocks must be first, see
+ # find_toplevel_snippets.
+ # We leave simple strings be part of the code
+ snippet_types = (
+ 'multiline_comment',
+ 'singleline_comment',
+ 'string',
+# 'char',
+ )
+
+ chunks = find_toplevel_snippets (s, snippet_types)
+ #code = filter (lambda x: is_derived_class (x.__class__, Substring),
+ # chunks)
+
+ t = string.join (map (lambda x: x.filter_text (), chunks), '')
+ fixt = file
+ if s != t:
+ if not outdir:
+ os.system ('mv %s %s~' % (file, file))
+ else:
+ fixt = os.path.join (outdir,
+ os.path.basename (file))
+ h = open (fixt, "w")
+ h.write (t)
+ h.close ()
+ if s != t or indent_p:
+ indent_file (fixt)
+
+def indent_file (file):
+ emacs = '''emacs\
+ --no-window-system\
+ --batch\
+ --no-site-file\
+ --no-init-file\
+ %(file)s\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "cc-mode")
+ (c++-mode)
+ (indent-region (point-min) (point-max))
+ (if (buffer-modified-p (current-buffer))
+ (save-buffer)))' ''' % vars ()
+ emacsclient = '''emacsclient\
+ --socket-name=%(socketdir)s/%(socketname)s\
+ --no-wait\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "cc-mode")
+ (find-file "%(file)s")
+ (c++-mode)
+ (indent-region (point-min) (point-max))
+ (if (buffer-modified-p (current-buffer))
+ (save-buffer)))' ''' \
+ % { 'file': file,
+ 'socketdir' : socketdir,
+ 'socketname' : socketname, }
+ if verbose_p:
+ sys.stderr.write (emacs)
+ sys.stderr.write ('\n')
+ os.system (emacs)
+
+
+def usage ():
+ sys.stdout.write (r'''
+Usage:
+fixcc [OPTION]... FILE...
+
+Options:
+ --help
+ --indent reindent, even if no changes
+ --verbose
+ --test
+
+Typical use with LilyPond:
+
+ fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out)
+
+This script is licensed under the GNU GPL
+''')
+
+def do_options ():
+ global indent_p, outdir, verbose_p
+ (options, files) = getopt.getopt (sys.argv[1:], '',
+ ['help', 'indent', 'outdir=',
+ 'test', 'verbose'])
+ for (o, a) in options:
+ if o == '--help':
+ usage ()
+ sys.exit (0)
+ elif o == '--indent':
+ indent_p = 1
+ elif o == '--outdir':
+ outdir = a
+ elif o == '--verbose':
+ verbose_p = 1
+ elif o == '--test':
+ test ()
+ sys.exit (0)
+ else:
+ assert unimplemented
+ if not files:
+ usage ()
+ sys.exit (2)
+ return files
+
+
+outdir = 0
+format = CXX
+socketdir = '/tmp/fixcc'
+socketname = 'fixcc%d' % os.getpid ()
+
+def setup_client ():
+ #--no-window-system\
+ #--batch\
+ os.unlink (os.path.join (socketdir, socketname))
+ os.mkdir (socketdir, 0700)
+ emacs='''emacs\
+ --no-site-file\
+ --no-init-file\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "server")
+ (setq server-socket-dir "%(socketdir)s")
+ (setq server-name "%(socketname)s")
+ (server-start)
+ (while t) (sleep 1000))' ''' \
+ % { 'socketdir' : socketdir,
+ 'socketname' : socketname, }
+
+ if not os.fork ():
+ os.system (emacs)
+ sys.exit (0)
+ while not os.path.exists (os.path.join (socketdir, socketname)):
+ time.sleep (1)
+
+def main ():
+ #emacsclient should be faster, but this does not work yet
+ #setup_client ()
+ files = do_options ()
+ if outdir and not os.path.isdir (outdir):
+ os.makedirs (outdir)
+ for i in files:
+ sys.stderr.write ('%s...\n' % i)
+ nitpick_file (outdir, i)
+
+
+## TODO: make this compilable and check with g++
+TEST = '''
+#include <libio.h>
+#include <map>
+class
+ostream ;
+
+class Foo {
+public: static char* foo ();
+std::map<char*,int>* bar (char, char) { return 0; }
+};
+typedef struct
+{
+ Foo **bar;
+} String;
+
+ostream &
+operator << (ostream & os, String d);
+
+typedef struct _t_ligature
+{
+ char *succ, *lig;
+ struct _t_ligature * next;
+} AFM_Ligature;
+
+typedef std::map < AFM_Ligature const *, int > Bar;
+
+ /**
+ (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
+ */
+
+/* ||
+* vv
+* !OK OK
+*/
+/* ||
+ vv
+ !OK OK
+*/
+char *
+Foo:: foo ()
+{
+int
+i
+;
+ char* a= &++ i ;
+ a [*++ a] = (char*) foe (*i, &bar) *
+ 2;
+ int operator double ();
+ std::map<char*,int> y =*bar(-*a ,*b);
+ Interval_t<T> & operator*= (T r);
+ Foo<T>*c;
+ int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2);
+ delete *p;
+ if (abs (f)*2 > abs (d) *FUDGE)
+ ;
+ while (0);
+ for (; i<x foo(); foo>bar);
+ for (; *p && > y;
+ foo > bar)
+;
+ do {
+ ;;;
+ }
+ while (foe);
+
+ squiggle. extent;
+ 1 && * unsmob_moment (lf);
+ line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm
+(): SCM_EOL);
+ case foo: k;
+
+ if (0) {a=b;} else {
+ c=d;
+ }
+
+ cookie_io_functions_t Memory_out_stream::functions_ = {
+ Memory_out_stream::reader,
+ ...
+ };
+
+ int compare (Array < Pitch> *, Array < Pitch> *);
+ original_ = (Grob *) & s;
+ Drul_array< Link_array<Grob> > o;
+}
+
+ header_.char_info_pos = (6 + header_length) * 4;
+ return ly_bool2scm (*ma < * mb);
+
+ 1 *::sign(2);
+
+ (shift) *-d;
+
+ a = 0 ? *x : *y;
+
+a = "foo() 2,2,4";
+{
+ if (!span_)
+ {
+ span_ = make_spanner ("StaffSymbol", SCM_EOL);
+ }
+}
+{
+ if (!span_)
+ {
+ span_ = make_spanner (StaffSymbol, SCM_EOL);
+ }
+}
+'''
+
+def test ():
+ test_file = 'fixcc.cc'
+ open (test_file, 'w').write (TEST)
+ nitpick_file (outdir, test_file)
+ sys.stdout.write (open (test_file).read ())
+
+if __name__ == '__main__':
+ main ()
+
--- /dev/null
+#!/usr/bin/env python
+
+import sys
+import os
+import glob
+import re
+
+USAGE = ''' Usage: makelsr.py LSR_SNIPPETS_DIR
+This script must be run from top of the source tree;
+it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR.
+'''
+
+LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it
+%% This file is in the public domain.
+'''
+
+LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new
+%% This file is in the public domain.
+'''
+
+DEST = os.path.join ('input', 'lsr')
+NEW_LYS = os.path.join ('input', 'new')
+TEXIDOCS = os.path.join ('input', 'texidocs')
+
+TAGS = []
+# NR 1
+TAGS.extend (['pitches', 'rhythms', 'expressive-marks',
+'repeats', 'simultaneous-notes', 'staff-notation',
+'editorial-annotations', 'text'])
+# NR 2
+TAGS.extend (['vocal-music', 'chords', 'keyboards',
+'percussion', 'fretted-strings', 'unfretted-strings',
+'ancient-notation', 'winds', 'world-music'
+])
+
+# other
+TAGS.extend (['contexts-and-engravers', 'tweaks-and-overrides',
+'paper-and-layout', 'breaks', 'spacing', 'midi', 'titles', 'template'])
+
+def exit_with_usage (n=0):
+ sys.stderr.write (USAGE)
+ sys.exit (n)
+
+try:
+ in_dir = sys.argv[1]
+except:
+ exit_with_usage (2)
+
+if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)):
+ exit_with_usage (3)
+
+unsafe = []
+unconverted = []
+notags_files = []
+
+# mark the section that will be printed verbatim by lilypond-book
+end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S)
+
+def mark_verbatim_section (ly_code):
+ return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1)
+
+# '% LSR' comments are to be stripped
+lsr_comment_re = re.compile (r'\s*%+\s*LSR.*')
+
+begin_header_re = re.compile (r'\\header\s*{', re.M)
+
+# add tags to ly files from LSR
+def add_tags (ly_code, tags):
+ return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1)
+
+def copy_ly (srcdir, name, tags):
+ global unsafe
+ global unconverted
+ dest = os.path.join (DEST, name)
+ tags = ', '.join (tags)
+ s = open (os.path.join (srcdir, name)).read ()
+
+ texidoc_translations_path = os.path.join (TEXIDOCS,
+ os.path.splitext (name)[0] + '.texidoc')
+ if os.path.exists (texidoc_translations_path):
+ texidoc_translations = open (texidoc_translations_path).read ()
+ # Since we want to insert the translations verbatim using a
+ # regexp, \\ is understood as ONE escaped backslash. So we have
+ # to escape those backslashes once more...
+ texidoc_translations = texidoc_translations.replace ('\\', '\\\\')
+ s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1)
+
+ if in_dir in srcdir:
+ s = LY_HEADER_LSR + add_tags (s, tags)
+ else:
+ s = LY_HEADER_NEW + s
+
+ s = mark_verbatim_section (s)
+ s = lsr_comment_re.sub ('', s)
+ open (dest, 'w').write (s)
+
+ e = os.system ("convert-ly -e '%s'" % dest)
+ if e:
+ unconverted.append (dest)
+ if os.path.exists (dest + '~'):
+ os.remove (dest + '~')
+ # -V seems to make unsafe snippets fail nicer/sooner
+ e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest)
+ if e:
+ unsafe.append (dest)
+
+def read_source_with_dirs (src):
+ s = {}
+ l = {}
+ for tag in TAGS:
+ srcdir = os.path.join (src, tag)
+ l[tag] = set (map (os.path.basename, glob.glob (os.path.join (srcdir, '*.ly'))))
+ for f in l[tag]:
+ if f in s:
+ s[f][1].append (tag)
+ else:
+ s[f] = (srcdir, [tag])
+ return s, l
+
+
+tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"')
+
+def read_source (src):
+ s = {}
+ l = dict ([(tag, set()) for tag in TAGS])
+ for f in glob.glob (os.path.join (src, '*.ly')):
+ basename = os.path.basename (f)
+ m = tags_re.search (open (f, 'r').read ())
+ if m:
+ file_tags = [tag.strip() for tag in m.group (1). split(',')]
+ s[basename] = (src, file_tags)
+ [l[tag].add (basename) for tag in file_tags if tag in TAGS]
+ else:
+ notags_files.append (f)
+ return s, l
+
+
+def dump_file_list (file, list):
+ f = open (file, 'w')
+ f.write ('\n'.join (list) + '\n')
+
+## clean out existing lys and generated files
+map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) +
+ glob.glob (os.path.join (DEST, '*.snippet-list')))
+
+# read LSR source where tags are defined by subdirs
+snippets, tag_lists = read_source_with_dirs (in_dir)
+# read input/new where tags are directly
+s, l = read_source (NEW_LYS)
+snippets.update (s)
+for t in TAGS:
+ tag_lists[t].update (l[t])
+
+for (name, (srcdir, tags)) in snippets.items ():
+ copy_ly (srcdir, name, tags)
+
+for (tag, file_set) in tag_lists.items ():
+ dump_file_list (os.path.join (DEST, tag + '.snippet-list'), sorted(file_set))
+
+if unconverted:
+ sys.stderr.write ('These files could not be converted successfully by convert-ly:\n')
+ sys.stderr.write ('\n'.join (unconverted) + '\n\n')
+
+if notags_files:
+ sys.stderr.write ('No tags could be found in these files:\n')
+ sys.stderr.write ('\n'.join (notags_files) + '\n\n')
+
+dump_file_list ('lsr-unsafe.txt', unsafe)
+sys.stderr.write ('''
+
+Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY!
+ git add input/lsr/*.ly
+ xargs git-diff HEAD < lsr-unsafe.txt
+
+''')
+
--- /dev/null
+#!/usr/bin/env python
+
+notes = "CDEFGAB"
+alterations = [-1, 0, 1]
+
+def print_note (octave, note, alteration):
+ print " <note>\n <pitch>\n <step>%s</step>" % notes[note]
+ if alteration <> 0:
+ print " <alter>%s</alter>" % alteration
+ print " <octave>%s</octave>\n </pitch>\n <duration>1</duration>\n <voice>1</voice>\n <type>quarter</type>\n </note>" % octave
+
+
+print """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
+ "http://www.musicxml.org/dtds/partwise.dtd">
+<score-partwise>
+ <movement-title>Various piches and interval sizes</movement-title>
+ <part-list>
+ <score-part id="P1">
+ <part-name>MusicXML Part</part-name>
+ </score-part>
+ </part-list>
+ <!--=========================================================-->
+ <part id="P1">
+ <measure number="1">
+ <attributes>
+ <divisions>1</divisions>
+ <key>
+ <fifths>0</fifths>
+ <mode>major</mode>
+ </key>
+ <time symbol="common">
+ <beats>2</beats>
+ <beat-type>4</beat-type>
+ </time>
+ <clef>
+ <sign>G</sign>
+ <line>2</line>
+ </clef>
+ </attributes>
+"""
+
+start_octave = 5
+
+for octave in (start_octave, start_octave+1):
+ for note in (0,1,2,3,4,5,6):
+ for alteration in alterations:
+ if octave == start_octave and note == 0 and alteration == -1:
+ continue
+ print_note (octave, note, alteration)
+# if octave == start_octave and note == 0 and alteration == 0:
+# continue
+ print_note (start_octave-(octave-start_octave)-(1-(7-note)/7), (7-note)%7, -alteration)
+
+print """ </measure>
+ </part>
+</score-partwise>
+"""
--- /dev/null
+#!/usr/bin/env python
+
+notes = "CDEFGAB"
+alterations = [-1, 0, 1]
+
+def print_measure (nr, fifth, mode, atts1 = "", atts = "", final = ""):
+ print """ <measure number="%s">
+ <attributes>
+%s <key>
+ <fifths>%s</fifths>
+ <mode>%s</mode>
+ </key>
+%s </attributes>
+ <note>
+ <pitch>
+ <step>C</step>
+ <octave>4</octave>
+ </pitch>
+ <duration>2</duration>
+ <voice>1</voice>
+ <type>half</type>
+ </note>
+%s </measure>""" % (nr, atts1, fifth, mode, atts, final)
+
+first_div = """ <divisions>1</divisions>
+"""
+first_atts = """ <time symbol="common">
+ <beats>2</beats>
+ <beat-type>4</beat-type>
+ </time>
+ <clef>
+ <sign>G</sign>
+ <line>2</line>
+ </clef>
+"""
+
+final_barline = """ <barline location="right">
+ <bar-style>light-heavy</bar-style>
+ </barline>
+"""
+
+print """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
+ "http://www.musicxml.org/dtds/partwise.dtd">
+<score-partwise>
+ <movement-title>Different Key signatures</movement-title>
+ <identification>
+ <miscellaneous>
+ <miscellaneous-field name="description">Various key signature: from 11
+ flats to 11 sharps (each one first one measure in major, then one
+ measure in minor)</miscellaneous-field>
+ </miscellaneous>
+ </identification>
+ <part-list>
+ <score-part id="P1">
+ <part-name>MusicXML Part</part-name>
+ </score-part>
+ </part-list>
+ <!--=========================================================-->
+ <part id="P1">"""
+
+max_range = 11
+measure = 0
+for fifth in range(-max_range, max_range+1):
+ measure += 1
+ if fifth == -max_range:
+ print_measure (measure, fifth, "major", first_div, first_atts)
+ else:
+ print_measure (measure, fifth, "major")
+ measure += 1
+ if fifth == max_range:
+ print_measure (measure, fifth, "minor", "", "", final_barline)
+ else:
+ print_measure (measure, fifth, "minor")
+
+
+print """ </part>
+</score-partwise>"""
--- /dev/null
+#!/usr/bin/env python
+
+notes = "CDEFGAB"
+alterations = [-1, 0, 1]
+
+dot_xml = """ <dot/>
+"""
+tie_xml = """ <tie type="%s"/>
+"""
+tie_notation_xml = """ <notations><tied type="%s"/></notations>
+"""
+
+
+def generate_note (duration, end_tie = False):
+ if duration < 2:
+ (notetype, dur) = ("8th", 1)
+ elif duration < 4:
+ (notetype, dur) = ("quarter", 2)
+ elif duration < 8:
+ (notetype, dur) = ("half", 4)
+ else:
+ (notetype, dur) = ("whole", 8)
+ dur_processed = dur
+ dot = ""
+ if (duration - dur_processed >= dur/2):
+ dot = dot_xml
+ dur_processed += dur/2
+ if (duration - dur_processed >= max(dur/4, 1)):
+ dot += dot_xml
+ dur_processed += dur/4
+ tie = ""
+ tie_notation = ""
+ if end_tie:
+ tie += tie_xml % "stop"
+ tie_notation += tie_notation_xml % "stop"
+ second_note = None
+ if duration - dur_processed > 0:
+ second_note = generate_note (duration-dur_processed, True)
+ tie += tie_xml % "start"
+ tie_notation += tie_notation_xml % "start"
+ note = """ <note>
+ <pitch>
+ <step>C</step>
+ <octave>5</octave>
+ </pitch>
+ <duration>%s</duration>
+%s <voice>1</voice>
+ <type>%s</type>
+%s%s </note>""" % (dur_processed, tie, notetype, dot, tie_notation)
+ if second_note:
+ return "%s\n%s" % (note, second_note)
+ else:
+ return note
+
+def print_measure (nr, beats, type, params = "", attr = "", attr2 = "", barline = ""):
+ duration = 8*beats/type
+ note = generate_note (duration)
+
+ print """ <measure number="%s">
+ <attributes>
+%s <time%s>
+ <beats>%s</beats>
+ <beat-type>%s</beat-type>
+ </time>
+%s </attributes>
+%s
+%s </measure>""" % (nr, attr, params, beats, type, attr2, note, barline)
+
+first_key = """ <divisions>2</divisions>
+ <key>
+ <fifths>0</fifths>
+ <mode>major</mode>
+ </key>
+"""
+first_clef = """ <clef>
+ <sign>G</sign>
+ <line>2</line>
+ </clef>
+"""
+
+final_barline = """ <barline location="right">
+ <bar-style>light-heavy</bar-style>
+ </barline>
+"""
+
+print """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
+ "http://www.musicxml.org/dtds/partwise.dtd">
+<score-partwise>
+ <identification>
+ <miscellaneous>
+ <miscellaneous-field name="description">Various time signatures: 2/2
+ (alla breve), 4/4 (C), 2/2, 3/2, 2/4, 3/4, 4/4, 5/4, 3/8, 6/8,
+ 12/8</miscellaneous-field>
+ </miscellaneous>
+ </identification>
+ <part-list>
+ <score-part id="P1">
+ <part-name>MusicXML Part</part-name>
+ </score-part>
+ </part-list>
+ <!--=========================================================-->
+ <part id="P1">"""
+
+measure = 1
+
+print_measure (measure, 2, 2, " symbol=\"common\"", first_key, first_clef)
+measure += 1
+
+print_measure (measure, 4, 4, " symbol=\"common\"")
+measure += 1
+
+print_measure (measure, 2, 2)
+measure += 1
+
+print_measure (measure, 3, 2)
+measure += 1
+
+print_measure (measure, 2, 4)
+measure += 1
+
+print_measure (measure, 3, 4)
+measure += 1
+
+print_measure (measure, 4, 4)
+measure += 1
+
+print_measure (measure, 5, 4)
+measure += 1
+
+print_measure (measure, 3, 8)
+measure += 1
+
+print_measure (measure, 6, 8)
+measure += 1
+
+print_measure (measure, 12, 8, "", "", "", final_barline)
+measure += 1
+
+print """ </part>
+</score-partwise>"""
--- /dev/null
+Open($1);
+MergeKern($2)
+
+
+# The AFM files of `New Century Schoolbook' family as distributed within the
+# urw-fonts-1.0.7pre41.tar.bz2 archive contain a bunch of ligatures which
+# shouldn't be active by default:
+#
+# T + M -> trademark
+# N + o -> afii61352
+# i + j -> ij
+# I + J -> IJ
+#
+# This font bundle is shipped by Fedora Core 6 and other GNU/Linux
+# distributions; we simply remove those ligatures.
+
+SelectIf("trademark", "trademark", \
+ "afii61352", "afii61352", \
+ "ij", "ij", \
+ "IJ", "IJ");
+if (Strtol($version) < 20070501)
+ RemoveATT("Ligature", "*", "*");
+else
+ RemovePosSub("*");
+endif
+
+Generate($3 + $fontname + ".otf");
+
+# EOF
--- /dev/null
+#!/usr/bin/env python
+import os
+import sys
+
+for i in sys.argv[1:]:
+ print os.path.realpath (i)
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Temporary script that helps translated docs sources conversion
+# for texi2html processing
+
+# USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES
+
+print "tely-gettext.py"
+
+import sys
+import re
+import os
+import gettext
+
+if len (sys.argv) > 3:
+ buildscript_dir, localedir, lang = sys.argv[1:4]
+else:
+ print """USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES
+ For example scripts/aux/tely-gettext.py python/out Documentation/po/out-www de Documentation/de/user/*.tely"""
+ sys.exit (1)
+
+sys.path.append (buildscript_dir)
+import langdefs
+
+double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
+t = gettext.translation('lilypond-doc', localedir, [lang])
+_doc = t.gettext
+
+include_re = re.compile (r'@include (.*?)$', re.M)
+whitespaces = re.compile (r'\s+')
+ref_re = re.compile (r'(?ms)@(ruser|rprogram|ref|rlearning)\{(.*?)\}')
+node_section_re = re.compile (r'@node (.*?)\n@((?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) (.*?)\n')
+menu_entry_re = re.compile (r'\* (.*?)::')
+
+def ref_gettext (m):
+ r = whitespaces.sub (' ', m.group (2))
+ return '@' + m.group (1) + '{' + _doc (r) + '}'
+
+def node_gettext (m):
+ return '@node ' + _doc (m.group (1)) + '\n@' + \
+ m.group (2) + ' ' + _doc (m.group (3)) + \
+ '\n@translationof ' + m.group (1) + '\n'
+
+def menu_entry_gettext (m):
+ return '* ' + _doc (m.group (1)) + '::'
+
+def process_file (filename):
+ print "Processing %s" % filename
+ f = open (filename, 'r')
+ page = f.read ()
+ f.close()
+ page = node_section_re.sub (node_gettext, page)
+ page = ref_re.sub (ref_gettext, page)
+ page = menu_entry_re.sub (menu_entry_gettext, page)
+ page = page.replace ("""-- SKELETON FILE --
+When you actually translate this file, please remove these lines as
+well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", """@c -- SKELETON FILE --""")
+ page = page.replace ('UNTRANSLATED NODE: IGNORE ME', "@c UNTRANSLATED NODE: IGNORE ME")
+ includes = [whitespaces.sub ('', f) for f in include_re.findall (page)]
+ f = open (filename, 'w')
+ f.write (page)
+ f.close ()
+ dir = os.path.dirname (filename)
+ for file in includes:
+ p = os.path.join (dir, file)
+ if os.path.exists (p):
+ process_file (p)
+
+for filename in sys.argv[4:]:
+ process_file (filename)
--- /dev/null
+#!/usr/bin/env python
+# texi-langutils.py
+
+# WARNING: this script can't find files included in a different directory
+
+import sys
+import re
+import getopt
+import os
+
+import langdefs
+
+def read_pipe (command):
+ print command
+ pipe = os.popen (command)
+ output = pipe.read ()
+ if pipe.close ():
+ print "pipe failed: %(command)s" % locals ()
+ return output
+
+
+optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext'])
+process_includes = not ('-n', '') in optlist # -n don't process @include's in texinfo files
+
+make_gettext = ('--gettext', '') in optlist # --gettext generate a node list from a Texinfo source
+make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source
+
+output_file = 'doc.pot'
+
+# @untranslated should be defined as a macro in Texinfo source
+node_blurb = '''@untranslated
+'''
+doclang = ''
+head_committish = read_pipe ('git-rev-parse HEAD')
+intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*-
+@c This file is part of %(topfile)s
+@ignore
+ Translation of GIT committish: %(head_committish)s
+ When revising a translation, copy the HEAD committish of the
+ version that you are working on. See TRANSLATION for details.
+@end ignore
+'''
+
+end_blurb = """
+@c -- SKELETON FILE --
+"""
+
+for x in optlist:
+ if x[0] == '-o': # -o NAME set PO output file name to NAME
+ output_file = x[1]
+ elif x[0] == '-d': # -d DIR set working directory to DIR
+ os.chdir (x[1])
+ elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB
+ node_blurb = x[1]
+ elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB
+ intro_blurb = x[1]
+ elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG
+ doclang = '; documentlanguage: ' + x[1]
+
+texinfo_with_menus_re = re.compile (r"^(\*) +([^:\n]+)::.*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.*?)$|@(rglos){(.+?)}", re.M)
+
+texinfo_re = re.compile (r"^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M)
+
+ly_string_re = re.compile (r'^([a-zA-Z]+)[\t ]*=|%+[\t ]*(.*)$|\\(?:new|context)\s+(?:[a-zA-Z]*?(?:Staff(?:Group)?|Voice|FiguredBass|FretBoards|Names|Devnull))\s+=\s+"?([a-zA-Z]+)"?\s+')
+lsr_verbatim_ly_re = re.compile (r'% begin verbatim$')
+texinfo_verbatim_ly_re = re.compile (r'^@lilypond\[.*?verbatim')
+
+def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None, scan_ly=False):
+ try:
+ f = open (texifilename, 'r')
+ texifile = f.read ()
+ f.close ()
+ printedfilename = texifilename.replace ('../','')
+ includes = []
+
+ # process ly var names and comments
+ if output_file and (scan_ly or texifilename.endswith ('.ly')):
+ lines = texifile.splitlines ()
+ i = 0
+ in_verb_ly_block = False
+ if texifilename.endswith ('.ly'):
+ verbatim_ly_re = lsr_verbatim_ly_re
+ else:
+ verbatim_ly_re = texinfo_verbatim_ly_re
+ for i in range (len (lines)):
+ if verbatim_ly_re.search (lines[i]):
+ in_verb_ly_block = True
+ elif lines[i].startswith ('@end lilypond'):
+ in_verb_ly_block = False
+ elif in_verb_ly_block:
+ for (var, comment, context_id) in ly_string_re.findall (lines[i]):
+ if var:
+ output_file.write ('# ' + printedfilename + ':' + \
+ str (i + 1) + ' (variable)\n_(r"' + var + '")\n')
+ elif comment:
+ output_file.write ('# ' + printedfilename + ':' + \
+ str (i + 1) + ' (comment)\n_(r"' + \
+ comment.replace ('"', '\\"') + '")\n')
+ elif context_id:
+ output_file.write ('# ' + printedfilename + ':' + \
+ str (i + 1) + ' (context id)\n_(r"' + \
+ context_id + '")\n')
+
+ # process Texinfo node names and section titles
+ if write_skeleton:
+ g = open (os.path.basename (texifilename), 'w')
+ subst = globals ()
+ subst.update (locals ())
+ g.write (i_blurb % subst)
+ tutu = texinfo_with_menus_re.findall (texifile)
+ node_trigger = False
+ for item in tutu:
+ if item[0] == '*':
+ g.write ('* ' + item[1] + '::\n')
+ elif output_file and item[4] == 'rglos':
+ output_file.write ('_(r"' + item[5] + '") # @rglos in ' + printedfilename + '\n')
+ elif item[2] == 'menu':
+ g.write ('@menu\n')
+ elif item[2] == 'end menu':
+ g.write ('@end menu\n\n')
+ else:
+ g.write ('@' + item[2] + ' ' + item[3] + '\n')
+ if node_trigger:
+ g.write (n_blurb)
+ node_trigger = False
+ elif item[2] == 'include':
+ includes.append (item[3])
+ else:
+ if output_file:
+ output_file.write ('# @' + item[2] + ' in ' + \
+ printedfilename + '\n_(r"' + item[3].strip () + '")\n')
+ if item[2] == 'node':
+ node_trigger = True
+ g.write (end_blurb)
+ g.close ()
+
+ elif output_file:
+ toto = texinfo_re.findall (texifile)
+ for item in toto:
+ if item[0] == 'include':
+ includes.append(item[1])
+ elif item[2] == 'rglos':
+ output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n')
+ else:
+ output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n')
+
+ if process_includes:
+ dir = os.path.dirname (texifilename)
+ for item in includes:
+ process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file, scan_ly)
+ except IOError, (errno, strerror):
+ sys.stderr.write ("I/O error(%s): %s: %s\n" % (errno, texifilename, strerror))
+
+
+if intro_blurb != '':
+ intro_blurb += '\n\n'
+if node_blurb != '':
+ node_blurb = '\n' + node_blurb + '\n\n'
+if make_gettext:
+ node_list_filename = 'node_list'
+ node_list = open (node_list_filename, 'w')
+ node_list.write ('# -*- coding: utf-8 -*-\n')
+ for texi_file in texi_files:
+ # Urgly: scan ly comments and variable names only in English doco
+ is_english_doc = 'Documentation/user' in texi_file
+ process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
+ os.path.basename (texi_file), node_list,
+ scan_ly=is_english_doc)
+ for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'):
+ node_list.write ('_(r"' + word + '")\n')
+ node_list.close ()
+ os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename)
+else:
+ for texi_file in texi_files:
+ process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
+ os.path.basename (texi_file))
--- /dev/null
+#!/usr/bin/env python
+# texi-skeleton-update.py
+
+import sys
+import glob
+import os
+import shutil
+
+sys.stderr.write ('texi-skeleton-update.py\n')
+
+orig_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[1] + '/*.ite??')])
+new_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[2] + '/*.ite??')])
+
+for f in new_skeletons:
+ if f in orig_skeletons:
+ g = open (os.path.join (sys.argv[1], f), 'r').read ()
+ if '-- SKELETON FILE --' in g:
+ sys.stderr.write ("Updating %s...\n" % f)
+ shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
+ elif f != 'fdl.itexi':
+ sys.stderr.write ("Copying new file %s...\n" % f)
+ shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
+
+for f in orig_skeletons.difference (new_skeletons):
+ sys.stderr.write ("Warning: outdated skeleton file %s\n" % f)
--- /dev/null
+#!/usr/bin/env python
+
+"""
+USAGE: translations-status.py BUILDSCRIPT-DIR LOCALEDIR
+
+ This script must be run from Documentation/
+
+ Reads template files translations.template.html.in
+and for each LANG in LANGUAGES LANG/translations.template.html.in
+ Writes translations.html.in and for each LANG in LANGUAGES
+translations.LANG.html.in
+ Writes out/translations-status.txt
+ Updates word counts in TRANSLATION
+"""
+
+import sys
+import re
+import string
+import os
+
+import langdefs
+import buildlib
+
+def progress (str):
+ sys.stderr.write (str + '\n')
+
+progress ("translations-status.py")
+
+_doc = lambda s: s
+
+# load gettext messages catalogs
+translation = langdefs.translation
+
+
+language_re = re.compile (r'^@documentlanguage (.+)', re.M)
+comments_re = re.compile (r'^@ignore\n(.|\n)*?\n@end ignore$|@c .*?$', re.M)
+space_re = re.compile (r'\s+', re.M)
+lilypond_re = re.compile (r'@lilypond({.*?}|(.|\n)*?\n@end lilypond$)', re.M)
+node_re = re.compile ('^@node .*?$', re.M)
+title_re = re.compile ('^@(top|chapter|(?:sub){0,2}section|' + \
+'(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?) (.*?)$', re.M)
+include_re = re.compile ('^@include (.*?)$', re.M)
+
+translators_re = re.compile (r'^@c\s+Translators\s*:\s*(.*?)$', re.M | re.I)
+checkers_re = re.compile (r'^@c\s+Translation\s*checkers\s*:\s*(.*?)$',
+ re.M | re.I)
+status_re = re.compile (r'^@c\s+Translation\s*status\s*:\s*(.*?)$', re.M | re.I)
+post_gdp_re = re.compile ('post.GDP', re.I)
+untranslated_node_str = '@untranslated'
+skeleton_str = '-- SKELETON FILE --'
+
+section_titles_string = _doc ('Section titles')
+last_updated_string = _doc (' <p><i>Last updated %s</i></p>\n')
+detailed_status_heads = [_doc ('Translators'), _doc ('Translation checkers'),
+ _doc ('Translated'), _doc ('Up to date'),
+ _doc ('Other info')]
+format_table = {
+ 'not translated': {'color':'d0f0f8', 'short':_doc ('no'), 'abbr':'NT',
+ 'long':_doc ('not translated')},
+ 'partially translated': {'color':'dfef77',
+ 'short':_doc ('partially (%(p)d %%)'),
+ 'abbr':'%(p)d%%',
+ 'long':_doc ('partially translated (%(p)d %%)')},
+ 'fully translated': {'color':'1fff1f', 'short':_doc ('yes'), 'abbr':'FT',
+ 'long': _doc ('translated')},
+ 'up to date': {'short':_doc ('yes'), 'long':_doc ('up to date'),
+ 'abbr':'100%%', 'vague':_doc ('up to date')},
+ 'outdated': {'short':_doc ('partially'), 'abbr':'%(p)d%%',
+ 'vague':_doc ('partially up to date')},
+ 'N/A': {'short':_doc ('N/A'), 'abbr':'N/A', 'color':'d587ff', 'vague':''},
+ 'pre-GDP':_doc ('pre-GDP'),
+ 'post-GDP':_doc ('post-GDP')
+}
+
+texi_level = {
+# (Unumbered/Numbered/Lettered, level)
+ 'top': ('u', 0),
+ 'unnumbered': ('u', 1),
+ 'unnumberedsec': ('u', 2),
+ 'unnumberedsubsec': ('u', 3),
+ 'chapter': ('n', 1),
+ 'section': ('n', 2),
+ 'subsection': ('n', 3),
+ 'appendix': ('l', 1)
+}
+
+appendix_number_trans = string.maketrans ('@ABCDEFGHIJKLMNOPQRSTUVWXY',
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
+
+class SectionNumber (object):
+ def __init__ (self):
+ self.__data = [[0,'u']]
+
+ def __increase_last_index (self):
+ type = self.__data[-1][1]
+ if type == 'l':
+ self.__data[-1][0] = \
+ self.__data[-1][0].translate (appendix_number_trans)
+ elif type == 'n':
+ self.__data[-1][0] += 1
+
+ def format (self):
+ if self.__data[-1][1] == 'u':
+ return ''
+ return '.'.join ([str (i[0]) for i in self.__data if i[1] != 'u']) + ' '
+
+ def increase (self, (type, level)):
+ if level == 0:
+ self.__data = [[0,'u']]
+ while level + 1 < len (self.__data):
+ del self.__data[-1]
+ if level + 1 > len (self.__data):
+ self.__data.append ([0, type])
+ if type == 'l':
+ self.__data[-1][0] = '@'
+ if type == self.__data[-1][1]:
+ self.__increase_last_index ()
+ else:
+ self.__data[-1] = ([0, type])
+ if type == 'l':
+ self.__data[-1][0] = 'A'
+ elif type == 'n':
+ self.__data[-1][0] = 1
+ return self.format ()
+
+
+def percentage_color (percent):
+ p = percent / 100.0
+ if p < 0.33:
+ c = [hex (int (3 * p * b + (1 - 3 * p) * a))[2:]
+ for (a, b) in [(0xff, 0xff), (0x5c, 0xa6), (0x5c, 0x4c)]]
+ elif p < 0.67:
+ c = [hex (int ((3 * p - 1) * b + (2 - 3 * p) * a))[2:]
+ for (a, b) in [(0xff, 0xff), (0xa6, 0xff), (0x4c, 0x3d)]]
+ else:
+ c = [hex (int ((3 * p - 2) * b + 3 * (1 - p) * a))[2:]
+ for (a, b) in [(0xff, 0x1f), (0xff, 0xff), (0x3d, 0x1f)]]
+ return ''.join (c)
+
+
+def update_word_count (text, filename, word_count):
+ return re.sub (r'(?m)^(\d+) *' + filename,
+ str (word_count).ljust (6) + filename,
+ text)
+
+po_msgid_re = re.compile (r'^msgid "(.*?)"(?:\n"(.*?)")*', re.M)
+
+def po_word_count (po_content):
+ s = ' '.join ([''.join (t) for t in po_msgid_re.findall (po_content)])
+ return len (space_re.split (s))
+
+sgml_tag_re = re.compile (r'<.*?>', re.S)
+
+def sgml_word_count (sgml_doc):
+ s = sgml_tag_re.sub ('', sgml_doc)
+ return len (space_re.split (s))
+
+def tely_word_count (tely_doc):
+ '''
+ Calculate word count of a Texinfo document node by node.
+
+ Take string tely_doc as an argument.
+ Return a list of integers.
+
+ Texinfo comments and @lilypond blocks are not included in word counts.
+ '''
+ tely_doc = comments_re.sub ('', tely_doc)
+ tely_doc = lilypond_re.sub ('', tely_doc)
+ nodes = node_re.split (tely_doc)
+ return [len (space_re.split (n)) for n in nodes]
+
+
+class TelyDocument (object):
+ def __init__ (self, filename):
+ self.filename = filename
+ self.contents = open (filename).read ()
+
+ ## record title and sectionning level of first Texinfo section
+ m = title_re.search (self.contents)
+ if m:
+ self.title = m.group (2)
+ self.level = texi_level [m.group (1)]
+ else:
+ self.title = 'Untitled'
+ self.level = ('u', 1)
+
+ m = language_re.search (self.contents)
+ if m:
+ self.language = m.group (1)
+
+ included_files = [os.path.join (os.path.dirname (filename), t)
+ for t in include_re.findall (self.contents)]
+ self.included_files = [p for p in included_files if os.path.exists (p)]
+
+ def print_title (self, section_number):
+ return section_number.increase (self.level) + self.title
+
+
+class TranslatedTelyDocument (TelyDocument):
+ def __init__ (self, filename, masterdocument, parent_translation=None):
+ TelyDocument.__init__ (self, filename)
+
+ self.masterdocument = masterdocument
+ if not hasattr (self, 'language') \
+ and hasattr (parent_translation, 'language'):
+ self.language = parent_translation.language
+ if hasattr (self, 'language'):
+ self.translation = translation[self.language]
+ else:
+ self.translation = lambda x: x
+ self.title = self.translation (self.title)
+
+ ## record authoring information
+ m = translators_re.search (self.contents)
+ if m:
+ self.translators = [n.strip () for n in m.group (1).split (',')]
+ else:
+ self.translators = parent_translation.translators
+ m = checkers_re.search (self.contents)
+ if m:
+ self.checkers = [n.strip () for n in m.group (1).split (',')]
+ elif isinstance (parent_translation, TranslatedTelyDocument):
+ self.checkers = parent_translation.checkers
+ else:
+ self.checkers = []
+
+ ## check whether translation is pre- or post-GDP
+ m = status_re.search (self.contents)
+ if m:
+ self.post_gdp = bool (post_gdp_re.search (m.group (1)))
+ else:
+ self.post_gdp = False
+
+ ## record which parts (nodes) of the file are actually translated
+ self.partially_translated = not skeleton_str in self.contents
+ nodes = node_re.split (self.contents)
+ self.translated_nodes = [not untranslated_node_str in n for n in nodes]
+
+ ## calculate translation percentage
+ master_total_word_count = sum (masterdocument.word_count)
+ translation_word_count = \
+ sum ([masterdocument.word_count[k] * self.translated_nodes[k]
+ for k in range (min (len (masterdocument.word_count),
+ len (self.translated_nodes)))])
+ self.translation_percentage = \
+ 100 * translation_word_count / master_total_word_count
+
+ ## calculate how much the file is outdated
+ (diff_string, error) = \
+ buildlib.check_translated_doc (masterdocument.filename, self.filename, self.contents)
+ if error:
+ sys.stderr.write ('warning: %s: %s' % (self.filename, error))
+ self.uptodate_percentage = None
+ else:
+ diff = diff_string.splitlines ()
+ insertions = sum ([len (l) - 1 for l in diff
+ if l.startswith ('+')
+ and not l.startswith ('+++')])
+ deletions = sum ([len (l) - 1 for l in diff
+ if l.startswith ('-')
+ and not l.startswith ('---')])
+ outdateness_percentage = 50.0 * (deletions + insertions) / \
+ (masterdocument.size + 0.5 * (deletions - insertions))
+ self.uptodate_percentage = 100 - int (outdateness_percentage)
+ if self.uptodate_percentage > 100:
+ alternative = 50
+ progress ("%s: strange uptodateness percentage %d %%, \
+setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
+ self.uptodate_percentage = alternative
+ elif self.uptodate_percentage < 1:
+ alternative = 1
+ progress ("%s: strange uptodateness percentage %d %%, \
+setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
+ self.uptodate_percentage = alternative
+
+ def completeness (self, formats=['long'], translated=False):
+ if translated:
+ translation = self.translation
+ else:
+ translation = lambda x: x
+
+ if isinstance (formats, str):
+ formats = [formats]
+ p = self.translation_percentage
+ if p == 0:
+ status = 'not translated'
+ elif p == 100:
+ status = 'fully translated'
+ else:
+ status = 'partially translated'
+ return dict ([(f, translation (format_table[status][f]) % locals())
+ for f in formats])
+
+ def uptodateness (self, formats=['long'], translated=False):
+ if translated:
+ translation = self.translation
+ else:
+ translation = lambda x: x
+
+ if isinstance (formats, str):
+ formats = [formats]
+ p = self.uptodate_percentage
+ if p == None:
+ status = 'N/A'
+ elif p == 100:
+ status = 'up to date'
+ else:
+ status = 'outdated'
+ l = {}
+ for f in formats:
+ if f == 'color' and p != None:
+ l['color'] = percentage_color (p)
+ else:
+ l[f] = translation (format_table[status][f]) % locals ()
+ return l
+
+ def gdp_status (self):
+ if self.post_gdp:
+ return self.translation (format_table['post-GDP'])
+ else:
+ return self.translation (format_table['pre-GDP'])
+
+ def short_html_status (self):
+ s = ' <td>'
+ if self.partially_translated:
+ s += '<br>\n '.join (self.translators) + '<br>\n'
+ if self.checkers:
+ s += ' <small>' + \
+ '<br>\n '.join (self.checkers) + '</small><br>\n'
+
+ c = self.completeness (['color', 'long'])
+ s += ' <span style="background-color: #%(color)s">\
+%(long)s</span><br>\n' % c
+
+ if self.partially_translated:
+ u = self.uptodateness (['vague', 'color'])
+ s += ' <span style="background-color: #%(color)s">\
+%(vague)s</span><br>\n' % u
+
+ s += ' </td>\n'
+ return s
+
+ def text_status (self):
+ s = self.completeness ('abbr')['abbr'] + ' '
+
+ if self.partially_translated:
+ s += self.uptodateness ('abbr')['abbr'] + ' '
+ return s
+
+ def html_status (self, numbering=SectionNumber ()):
+ if self.title == 'Untitled':
+ return ''
+
+ if self.level[1] == 0: # if self is a master document
+ s = '''<table align="center" border="2">
+ <tr align="center">
+ <th>%s</th>''' % self.print_title (numbering)
+ s += ''.join ([' <th>%s</th>\n' % self.translation (h)
+ for h in detailed_status_heads])
+ s += ' </tr>\n'
+ s += ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
+ % (self.translation (section_titles_string),
+ sum (self.masterdocument.word_count))
+
+ else:
+ s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
+ % (self.print_title (numbering),
+ sum (self.masterdocument.word_count))
+
+ if self.partially_translated:
+ s += ' <td>' + '<br>\n '.join (self.translators) + '</td>\n'
+ s += ' <td>' + '<br>\n '.join (self.checkers) + '</td>\n'
+ else:
+ s += ' <td></td>\n' * 2
+
+ c = self.completeness (['color', 'short'], translated=True)
+ s += ' <td><span style="background-color: #%(color)s">\
+%(short)s</span></td>\n' % {'color': c['color'],
+ 'short': c['short']}
+
+ if self.partially_translated:
+ u = self.uptodateness (['short', 'color'], translated=True)
+ s += ' <td><span style="background-color: #%(color)s">\
+%(short)s</span></td>\n' % {'color': u['color'],
+ 'short': u['short']}
+ else:
+ s += ' <td></td>\n'
+
+ s += ' <td>' + self.gdp_status () + '</td>\n </tr>\n'
+ s += ''.join ([i.translations[self.language].html_status (numbering)
+ for i in self.masterdocument.includes
+ if self.language in i.translations])
+
+ if self.level[1] == 0: # if self is a master document
+ s += '</table>\n<p></p>\n'
+ return s
+
+class MasterTelyDocument (TelyDocument):
+ def __init__ (self,
+ filename,
+ parent_translations=dict ([(lang, None)
+ for lang in langdefs.LANGDICT])):
+ TelyDocument.__init__ (self, filename)
+ self.size = len (self.contents)
+ self.word_count = tely_word_count (self.contents)
+ translations = dict ([(lang, os.path.join (lang, filename))
+ for lang in langdefs.LANGDICT])
+ self.translations = \
+ dict ([(lang,
+ TranslatedTelyDocument (translations[lang],
+ self, parent_translations.get (lang)))
+ for lang in langdefs.LANGDICT
+ if os.path.exists (translations[lang])])
+ if self.translations:
+ self.includes = [MasterTelyDocument (f, self.translations)
+ for f in self.included_files]
+ else:
+ self.includes = []
+
+ def update_word_counts (self, s):
+ s = update_word_count (s, self.filename, sum (self.word_count))
+ for i in self.includes:
+ s = i.update_word_counts (s)
+ return s
+
+ def html_status (self, numbering=SectionNumber ()):
+ if self.title == 'Untitled' or not self.translations:
+ return ''
+ if self.level[1] == 0: # if self is a master document
+ s = '''<table align="center" border="2">
+ <tr align="center">
+ <th>%s</th>''' % self.print_title (numbering)
+ s += ''.join ([' <th>%s</th>\n' % l for l in self.translations])
+ s += ' </tr>\n'
+ s += ' <tr align="left">\n <td>Section titles<br>(%d)</td>\n' \
+ % sum (self.word_count)
+
+ else: # if self is an included file
+ s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
+ % (self.print_title (numbering), sum (self.word_count))
+
+ s += ''.join ([t.short_html_status ()
+ for t in self.translations.values ()])
+ s += ' </tr>\n'
+ s += ''.join ([i.html_status (numbering) for i in self.includes])
+
+ if self.level[1] == 0: # if self is a master document
+ s += '</table>\n<p></p>\n'
+ return s
+
+ def text_status (self, numbering=SectionNumber (), colspec=[48,12]):
+ if self.title == 'Untitled' or not self.translations:
+ return ''
+
+ s = ''
+ if self.level[1] == 0: # if self is a master document
+ s += (self.print_title (numbering) + ' ').ljust (colspec[0])
+ s += ''.join (['%s'.ljust (colspec[1]) % l
+ for l in self.translations])
+ s += '\n'
+ s += ('Section titles (%d)' % \
+ sum (self.word_count)).ljust (colspec[0])
+
+ else:
+ s = '%s (%d) ' \
+ % (self.print_title (numbering), sum (self.word_count))
+ s = s.ljust (colspec[0])
+
+ s += ''.join ([t.text_status ().ljust(colspec[1])
+ for t in self.translations.values ()])
+ s += '\n\n'
+ s += ''.join ([i.text_status (numbering) for i in self.includes])
+
+ if self.level[1] == 0:
+ s += '\n'
+ return s
+
+
+update_category_word_counts_re = re.compile (r'(?ms)^-(\d+)-(.*?\n)\d+ *total')
+
+counts_re = re.compile (r'(?m)^(\d+) ')
+
+def update_category_word_counts_sub (m):
+ return '-' + m.group (1) + '-' + m.group (2) + \
+ str (sum ([int (c)
+ for c in counts_re.findall (m.group (2))])).ljust (6) + \
+ 'total'
+
+
+progress ("Reading documents...")
+
+tely_files = \
+ buildlib.read_pipe ("find -maxdepth 2 -name '*.tely'")[0].splitlines ()
+tely_files.sort ()
+master_docs = [MasterTelyDocument (os.path.normpath (filename))
+ for filename in tely_files]
+master_docs = [doc for doc in master_docs if doc.translations]
+
+main_status_page = open ('translations.template.html.in').read ()
+
+enabled_languages = [l for l in langdefs.LANGDICT
+ if langdefs.LANGDICT[l].enabled
+ and l != 'en']
+lang_status_pages = \
+ dict ([(l, open (os.path.join (l, 'translations.template.html.in')). read ())
+ for l in enabled_languages])
+
+progress ("Generating status pages...")
+
+date_time = buildlib.read_pipe ('LANG= date -u')[0]
+
+main_status_html = last_updated_string % date_time
+main_status_html += '\n'.join ([doc.html_status () for doc in master_docs])
+
+html_re = re.compile ('<html>', re.I)
+end_body_re = re.compile ('</body>', re.I)
+
+html_header = '''<html>
+<!-- This page is automatically generated by translation-status.py from
+translations.template.html.in; DO NOT EDIT !-->'''
+
+main_status_page = html_re.sub (html_header, main_status_page)
+
+main_status_page = end_body_re.sub (main_status_html + '\n</body>',
+ main_status_page)
+
+open ('translations.html.in', 'w').write (main_status_page)
+
+for l in enabled_languages:
+ date_time = buildlib.read_pipe ('LANG=%s date -u' % l)[0]
+ lang_status_pages[l] = translation[l] (last_updated_string) % date_time + lang_status_pages[l]
+ lang_status_page = html_re.sub (html_header, lang_status_pages[l])
+ html_status = '\n'.join ([doc.translations[l].html_status ()
+ for doc in master_docs
+ if l in doc.translations])
+ lang_status_page = end_body_re.sub (html_status + '\n</body>',
+ lang_status_page)
+ open (os.path.join (l, 'translations.html.in'), 'w').write (lang_status_page)
+
+main_status_txt = '''Documentation translations status
+Generated %s
+NT = not translated
+FT = fully translated
+
+''' % date_time
+
+main_status_txt += '\n'.join ([doc.text_status () for doc in master_docs])
+
+status_txt_file = 'out/translations-status.txt'
+progress ("Writing %s..." % status_txt_file)
+open (status_txt_file, 'w').write (main_status_txt)
+
+translation_instructions_file = 'TRANSLATION'
+progress ("Updating %s..." % translation_instructions_file)
+translation_instructions = open (translation_instructions_file).read ()
+
+for doc in master_docs:
+ translation_instructions = doc.update_word_counts (translation_instructions)
+
+for html_file in re.findall (r'(?m)^\d+ *(\S+?\.html\S*?)(?: |$)',
+ translation_instructions):
+ word_count = sgml_word_count (open (html_file).read ())
+ translation_instructions = update_word_count (translation_instructions,
+ html_file,
+ word_count)
+
+for po_file in re.findall (r'(?m)^\d+ *(\S+?\.po\S*?)(?: |$)',
+ translation_instructions):
+ word_count = po_word_count (open (po_file).read ())
+ translation_instructions = update_word_count (translation_instructions,
+ po_file,
+ word_count)
+
+translation_instructions = \
+ update_category_word_counts_re.sub (update_category_word_counts_sub,
+ translation_instructions)
+
+open (translation_instructions_file, 'w').write (translation_instructions)
--- /dev/null
+#!/usr/bin/env python
+# update-snippets.py
+
+# USAGE: update-snippets.py REFERENCE-DIR TARGET-DIR FILES
+#
+# update ly snippets in TARGET-DIR/FILES with snippets from REFERENCE-DIR/FILES
+#
+# More precisely, each existing FILE in TARGET-DIR is matched to the FILE in
+# REFERENCE-DIR (it the latter does not exist, a warning is given).
+#
+# Shell wildcards expansion is performed on FILES.
+# This script currently supports Texinfo format.
+# Ly snippets preceded with a line containing '@c KEEP LY' in TARGET-DIR/FILES
+# will not be updated.
+# An error occurs if REFERENCE-DIR/FILE and TARGET-DIR/FILE do not have the
+# same snippets count.
+
+import sys
+import os
+import glob
+import re
+
+print "update-snippets.py"
+
+comment_re = re.compile (r'(?<!@)(@c(?:omment)? .*?\n|^@ignore\n.*?\n@end ignore\n)', re.M | re.S)
+snippet_re = re.compile (r'^(@lilypond(?:file)?(?:\[.*?\])?\s*\{.+?\}|@lilypond(?:\[.*?\])?(?:.|\n)+?@end lilypond)', re.M)
+
+
+def snippet_split (l):
+ r = []
+ for s in [s for s in l if s]:
+ if s.startswith ('@c ') or s.startswith ('@ignore\n') or s.startswith ('@comment '):
+ r.append(s)
+ else:
+ r += [t for t in snippet_re.split (s) if t]
+ return r
+
+def count_snippet (l):
+ k = 0
+ for s in l:
+ if s.startswith ('@lilypond'):
+ k += 1
+ return k
+
+def find_next_snippet (l, k):
+ while not l[k].startswith ('@lilypond'):
+ k += 1
+ return k
+
+exit_code = 0
+
+def update_exit_code (code):
+ global exit_code
+ exit_code = max (code, exit_code)
+
+ref_dir, target_dir = sys.argv [1:3]
+file_patterns = sys.argv[3:]
+
+total_snippet_count = 0
+changed_snippets_count = 0
+
+for pattern in file_patterns:
+ files = glob.glob (os.path.join (target_dir, pattern))
+ for file in files:
+ ref_file = os.path.join (ref_dir, os.path.basename (file))
+ if not os.path.isfile (ref_file):
+ sys.stderr.write ("Warning: %s: no such file.\nReference file for %s not found.\n" % (ref_file, file))
+ continue
+ f = open (file, 'r')
+ target_source = comment_re.split (f.read ())
+ f.close ()
+ if reduce (lambda x, y: x or y, ['-- SKELETON FILE --' in s for s in target_source]):
+ sys.stderr.write ("Skipping skeleton file %s\n" % file)
+ continue
+ g = open (ref_file, 'r')
+ ref_source = comment_re.split (g.read ())
+ target_source = snippet_split (target_source)
+ ref_source = snippet_split (ref_source)
+ if '' in target_source or '' in ref_source:
+ raise "AAAAARGH: unuseful empty string"
+ snippet_count = count_snippet (target_source)
+ if not snippet_count == count_snippet (ref_source):
+ update_exit_code (1)
+ sys.stderr.write ("Error: %s and %s have different snippet counts.\n\
+Update translation by at least adding a @lilypond block where necessary, then rerun this script.\n" % (ref_file, file))
+ continue
+ total_snippet_count += snippet_count
+ c = 0
+ k = -1
+ for j in range (len (target_source)):
+ if target_source[j].startswith ('@lilypond'):
+ k = find_next_snippet (ref_source, k+1)
+ if j > 0 and (not target_source[j-1].startswith ('@c KEEP LY')) and target_source[j] != ref_source[k]:
+ target_source[j] = ref_source[k]
+ c += 1
+ changed_snippets_count += 1
+ f = open (file, 'w')
+ f.write (''.join (target_source))
+ sys.stderr.write ('%s: %d/%d snippets updated\n' % (file, c, snippet_count))
+
+sys.stderr.write ('\nTotal: %d snippets, %d updated snippets.\n' % (total_snippet_count, changed_snippets_count))
+sys.exit (exit_code)
--- /dev/null
+depth = ../..
+
+STEPMAKE_TEMPLATES=script install po
+
+include $(depth)/make/stepmake.make
+
+# Should we install these? This should be handled by sysadmin or
+# packager but if she forgets...
+#INSTALLATION_OUT_SUFFIXES=1
+#INSTALLATION_OUT_DIR1=$(local_lilypond_datadir)/scripts
+#INSTALLATION_OUT_FILES1=$(outdir)/lilypond-login $(outdir)/lilypond-profile
+
+all: $(INSTALLATION_FILES)
+
--- /dev/null
+#!@PYTHON@
+import os
+import sys
+import getopt
+import tempfile
+
+# usage:
+def usage ():
+ print 'usage: %s [-s style] [-o <outfile>] BIBFILES...'
+
+(options, files) = getopt.getopt (sys.argv[1:], 's:o:', [])
+
+output = 'bib.html'
+style = 'long'
+
+for (o,a) in options:
+ if o == '-h' or o == '--help':
+ usage ()
+ sys.exit (0)
+ elif o == '-s' or o == '--style':
+ style = a
+ elif o == '-o' or o == '--output':
+ output = a
+ else:
+ raise Exception ('unknown option: %s' % o)
+
+
+if style not in ['alpha','index','long','longp','long-pario','short','short-pario','split']:
+ sys.stderr.write ("Unknown style \`%s'\n" % style)
+
+tempfile = tempfile.mktemp ('bib2html')
+
+if not files:
+ usage ()
+ sys.exit (2)
+
+
+def strip_extension (f, ext):
+ (p, e) = os.path.splitext (f)
+ if e == ext:
+ e = ''
+ return p + e
+
+nf = []
+for f in files:
+ nf.append (strip_extension (f, '.bib'))
+
+files = ','.join (nf)
+
+open (tempfile + '.aux', 'w').write (r'''
+\relax
+\citation{*}
+\bibstyle{html-%(style)s}
+\bibdata{%(files)s}''' % vars ())
+
+cmd = "bibtex %s" % tempfile
+
+sys.stdout.write ("Invoking `%s'\n" % cmd)
+stat = os.system (cmd)
+if stat <> 0:
+ sys.exit(1)
+
+
+#TODO: do tex -> html on output
+
+bbl = open (tempfile + '.bbl').read ()
+
+open (output, 'w').write (bbl)
+
+
+def cleanup (tempfile):
+ for a in ['aux','bbl', 'blg']:
+ os.unlink (tempfile + '.' + a)
+
+cleanup (tempfile)
+
--- /dev/null
+#!@PYTHON@
+
+import sys
+import midi
+
+(h,tracks) = midi.parse (open (sys.argv[1]).read ())
+
+tracks = tracks[1:]
+
+for t in tracks:
+ for e in t:
+ print e
--- /dev/null
+#!@PYTHON@
+# -*- coding: utf-8 -*-
+# extract_texi_filenames.py
+
+# USAGE: extract_texi_filenames.py [-o OUTDIR] FILES
+#
+# -o OUTDIR specifies that output files should rather be written in OUTDIR
+#
+# Description:
+# This script parses the .texi file given and creates a file with the
+# nodename <=> filename/anchor map.
+# The idea behind: Unnumbered subsections go into the same file as the
+# previous numbered section, @translationof gives the original node name,
+# which is then used for the filename/anchor.
+#
+# If this script is run on a file texifile.texi, it produces a file
+# texifile[.LANG].xref-map with tab-separated entries of the form
+# NODE\tFILENAME\tANCHOR
+# LANG is the document language in case it's not 'en'
+# Note: The filename does not have any extension appended!
+# This file can then be used by our texi2html init script to determine
+# the correct file name and anchor for external refs
+
+import sys
+import re
+import os
+import getopt
+
+optlist, args = getopt.getopt (sys.argv[1:],'o:')
+files = args
+
+outdir = '.'
+for x in optlist:
+ if x[0] == '-o':
+ outdir = x[1]
+
+if not os.path.isdir (outdir):
+ if os.path.exists (outdir):
+ os.unlink (outdir)
+ os.makedirs (outdir)
+
+include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M)
+whitespaces = re.compile (r'\s+')
+section_translation_re = re.compile ('^@(node|(?:unnumbered|appendix)\
+(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|\
+(?:major|chap|(?:sub){0,2})heading|translationof) (.*?)\\s*$', re.MULTILINE)
+
+def expand_includes (m, filename):
+ filepath = os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'
+ if os.path.exists (filepath):
+ return extract_sections (filepath)[1]
+ else:
+ print "Unable to locate include file " + filepath
+ return ''
+
+lang_re = re.compile (r'^@documentlanguage (.+)', re.M)
+
+def extract_sections (filename):
+ result = ''
+ f = open (filename, 'r')
+ page = f.read ()
+ f.close()
+ # Search document language
+ m = lang_re.search (page)
+ if m and m.group (1) != 'en':
+ lang_suffix = '.' + m.group (1)
+ else:
+ lang_suffix = ''
+ # Replace all includes by their list of sections and extract all sections
+ page = include_re.sub (lambda m: expand_includes (m, filename), page)
+ sections = section_translation_re.findall (page)
+ for sec in sections:
+ result += "@" + sec[0] + " " + sec[1] + "\n"
+ return (lang_suffix, result)
+
+# Convert a given node name to its proper file name (normalization as explained
+# in the texinfo manual:
+# http://www.gnu.org/software/texinfo/manual/texinfo/html_node/HTML-Xref-Node-Name-Expansion.html
+def texinfo_file_name(title):
+ # exception: The top node is always mapped to index.html
+ if title == "Top":
+ return "index"
+ # File name normalization by texinfo (described in the texinfo manual):
+ # 1/2: letters and numbers are left unchanged
+ # 3/4: multiple, leading and trailing whitespace is removed
+ title = title.strip ();
+ title = whitespaces.sub (' ', title)
+ # 5: all remaining spaces are converted to '-'
+ # 6: all other 7- or 8-bit chars are replaced by _xxxx (xxxx=ascii character code)
+ result = ''
+ for index in range(len(title)):
+ char = title[index]
+ if char == ' ': # space -> '-'
+ result += '-'
+ elif ( ('0' <= char and char <= '9' ) or
+ ('A' <= char and char <= 'Z' ) or
+ ('a' <= char and char <= 'z' ) ): # number or letter
+ result += char
+ else:
+ ccode = ord(char)
+ if ccode <= 0xFFFF:
+ result += "_%04x" % ccode
+ else:
+ result += "__%06x" % ccode
+ # 7: if name begins with number, prepend 't_g' (so it starts with a letter)
+ if (result != '') and (ord(result[0]) in range (ord('0'), ord('9'))):
+ result = 't_g' + result
+ return result
+
+texinfo_re = re.compile (r'@.*{(.*)}')
+def remove_texinfo (title):
+ return texinfo_re.sub (r'\1', title)
+
+def create_texinfo_anchor (title):
+ return texinfo_file_name (remove_texinfo (title))
+
+unnumbered_re = re.compile (r'unnumbered.*')
+def process_sections (filename, lang_suffix, page):
+ sections = section_translation_re.findall (page)
+ basename = os.path.splitext (os.path.basename (filename))[0]
+ p = os.path.join (outdir, basename) + lang_suffix + '.xref-map'
+ f = open (p, 'w')
+
+ this_title = ''
+ this_filename = 'index'
+ this_anchor = ''
+ this_unnumbered = False
+ had_section = False
+ for sec in sections:
+ if sec[0] == "node":
+ # Write out the cached values to the file and start a new section:
+ if this_title != '' and this_title != 'Top':
+ f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
+ had_section = False
+ this_title = remove_texinfo (sec[1])
+ this_anchor = create_texinfo_anchor (sec[1])
+ elif sec[0] == "translationof":
+ anchor = create_texinfo_anchor (sec[1])
+ # If @translationof is used, it gives the original node name, which
+ # we use for the anchor and the file name (if it is a numbered node)
+ this_anchor = anchor
+ if not this_unnumbered:
+ this_filename = anchor
+ else:
+ # Some pages might not use a node for every section, so treat this
+ # case here, too: If we already had a section and encounter enother
+ # one before the next @node, we write out the old one and start
+ # with the new values
+ if had_section and this_title != '':
+ f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
+ this_title = remove_texinfo (sec[1])
+ this_anchor = create_texinfo_anchor (sec[1])
+ had_section = True
+
+ # unnumbered nodes use the previously used file name, only numbered
+ # nodes get their own filename! However, top-level @unnumbered
+ # still get their own file.
+ this_unnumbered = unnumbered_re.match (sec[0])
+ if not this_unnumbered or sec[0] == "unnumbered":
+ this_filename = this_anchor
+
+ if this_title != '' and this_title != 'Top':
+ f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
+ f.close ()
+
+
+for filename in files:
+ print "extract_texi_filenames.py: Processing %s" % filename
+ (lang_suffix, sections) = extract_sections (filename)
+ process_sections (filename, lang_suffix, sections)
--- /dev/null
+#!@PYTHON@
+import sys
+import getopt
+import re
+import os
+
+(options, files) = \
+ getopt.getopt (sys.argv[1:],
+ '',
+ ['dir='])
+
+
+outdir = ''
+for opt in options:
+ o = opt[0]
+ a = opt[1]
+ if o == '--dir':
+ outdir = a
+ else:
+ print o
+ raise getopt.error
+
+# Ugh
+for design_size in [11,13,14,16,18,20,23,26]:
+ name = 'Emmentaler'
+ filename = name.lower ()
+ script = '''#!@FONTFORGE@
+
+New();
+
+# Separate Feta versioning?
+# * using 20 as Weight works for gnome-font-select widget: gfs
+
+notice = "";
+notice += "This font is distributed under the GNU General Public License. ";
+notice += "As a special exception, if you create a document which uses ";
+notice += "this font, and embed this font or unaltered portions of this ";
+notice += "font into the document, this font does not by itself cause the ";
+notice += "resulting document to be covered by the GNU General Public License.";;
+
+SetFontNames("%(name)s-%(design_size)d", "%(name)s", "%(name)s %(design_size)d", "%(design_size)d", notice, "@TOPLEVEL_VERSION@");
+
+MergeFonts("feta%(design_size)d.pfb");
+MergeFonts("parmesan%(design_size)d.pfb");
+
+# load nummer/din after setting PUA.
+i = 0;
+while (i < CharCnt())
+ Select(i);
+# crashes fontforge, use PUA for now -- jcn
+# SetUnicodeValue(i + 0xF0000, 0);
+/*
+PRIVATE AREA
+ In the BMP, the range 0xe000 to 0xf8ff will never be assigned to any
+ characters by the standard and is reserved for private usage. For the
+ Linux community, this private area has been subdivided further into the
+ range 0xe000 to 0xefff which can be used individually by any end-user
+ and the Linux zone in the range 0xf000 to 0xf8ff where extensions are
+ coordinated among all Linux users. The registry of the characters
+ assigned to the Linux zone is currently maintained by H. Peter Anvin
+ <Peter.Anvin@linux.org>.
+*/
+ SetUnicodeValue(i + 0xE000, 0);
+ ++i;
+endloop
+
+
+MergeFonts("feta-alphabet%(design_size)d.pfb");
+MergeKern("feta-alphabet%(design_size)d.tfm");
+
+LoadTableFromFile("LILF", "%(filename)s-%(design_size)d.subfonts");
+LoadTableFromFile("LILC", "feta%(design_size)d.otf-table");
+LoadTableFromFile("LILY", "feta%(design_size)d.otf-gtable");
+
+Generate("%(filename)s-%(design_size)d.otf");
+Generate("%(filename)s-%(design_size)d.svg");
+''' % vars()
+
+ basename = '%s-%d' % (filename, design_size)
+ path = os.path.join (outdir, basename + '.pe')
+ open (path, 'w').write (script)
+
+ subfonts = ['feta%(design_size)d',
+ 'parmesan%(design_size)d',
+ 'feta-alphabet%(design_size)d']
+
+ ns = []
+ for s in subfonts:
+ ns.append ('%s' % (s % vars()))
+
+ subfonts_str = ' '.join (ns)
+
+ open (os.path.join (outdir, '%(filename)s-%(design_size)d.subfonts' % vars()), 'w').write (subfonts_str)
+
+ path = os.path.join (outdir, '%s-%d.dep' % (filename, design_size))
+
+ deps = r'''%(filename)s-%(design_size)d.otf: $(outdir)/feta%(design_size)d.pfa \
+ $(outdir)/parmesan%(design_size)d.pfa \
+ $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-table \
+ $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-gtable
+''' % vars()
+ open (path, 'w').write (deps)
+
+ open (os.path.join (outdir, basename + '.fontname'), 'w').write ("%s-%d" % (name, design_size))
--- /dev/null
+#!@PYTHON@
+import os
+import sys
+import tempfile
+
+base = os.path.splitext (os.path.split (sys.argv[1])[1])[0]
+input = os.path.abspath (sys.argv[1])
+output = os.path.abspath (sys.argv[2])
+program_name= os.path.split (sys.argv[0])[1]
+
+dir = tempfile.mktemp (program_name)
+os.mkdir (dir, 0777)
+os.chdir(dir)
+
+def system (c):
+ print c
+ if os.system (c):
+ raise 'barf'
+
+outputs = []
+for sz in [48,32,16] :
+
+ for depth in [24,8]:
+ out = '%(base)s-%(sz)d-%(depth)d.png' % locals()
+ system ('convert -depth %(depth)d -sample %(sz)d %(input)s %(out)s' %
+ locals ())
+ outputs.append (out)
+
+system('icotool --output %s --create %s' % (output, ' '.join (outputs)))
+system('rm -rf %(dir)s' % locals())
+
--- /dev/null
+#!@BASH@
+# note: dash does not work
+
+pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*2007' '\1 2007--2008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change')
+pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*([^-]*--)(200[0-7])' '\1 \2\062008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change')
--- /dev/null
+#!@PERL@ -w
+
+# Generate a short man page from --help and --version output.
+# Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software
+# Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+# Written by Brendan O'Dea <bod@debian.org>
+# Available from ftp://ftp.gnu.org/gnu/help2man/
+
+use 5.005;
+use strict;
+use Getopt::Long;
+use Text::Tabs qw(expand);
+use POSIX qw(strftime setlocale LC_TIME);
+
+my $this_program = 'help2man';
+my $this_version = '1.28';
+my $version_info = <<EOT;
+GNU $this_program $this_version
+
+Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+Written by Brendan O'Dea <bod\@debian.org>
+EOT
+
+my $help_info = <<EOT;
+`$this_program' generates a man page out of `--help' and `--version' output.
+
+Usage: $this_program [OPTIONS]... EXECUTABLE
+
+ -n, --name=STRING description for the NAME paragraph
+ -s, --section=SECTION section number for manual page (1, 6, 8)
+ -m, --manual=TEXT name of manual (User Commands, ...)
+ -S, --source=TEXT source of program (FSF, Debian, ...)
+ -i, --include=FILE include material from `FILE'
+ -I, --opt-include=FILE include material from `FILE' if it exists
+ -o, --output=FILE send output to `FILE'
+ -p, --info-page=TEXT name of Texinfo manual
+ -N, --no-info suppress pointer to Texinfo manual
+ --help print this help, then exit
+ --version print version number, then exit
+
+EXECUTABLE should accept `--help' and `--version' options although
+alternatives may be specified using:
+
+ -h, --help-option=STRING help option string
+ -v, --version-option=STRING version option string
+
+Report bugs to <bug-help2man\@gnu.org>.
+EOT
+
+my $section = 1;
+my $manual = '';
+my $source = '';
+my $help_option = '--help';
+my $version_option = '--version';
+my ($opt_name, @opt_include, $opt_output, $opt_info, $opt_no_info);
+
+my %opt_def = (
+ 'n|name=s' => \$opt_name,
+ 's|section=s' => \$section,
+ 'm|manual=s' => \$manual,
+ 'S|source=s' => \$source,
+ 'i|include=s' => sub { push @opt_include, [ pop, 1 ] },
+ 'I|opt-include=s' => sub { push @opt_include, [ pop, 0 ] },
+ 'o|output=s' => \$opt_output,
+ 'p|info-page=s' => \$opt_info,
+ 'N|no-info' => \$opt_no_info,
+ 'h|help-option=s' => \$help_option,
+ 'v|version-option=s' => \$version_option,
+);
+
+# Parse options.
+Getopt::Long::config('bundling');
+GetOptions (%opt_def,
+ help => sub { print $help_info; exit },
+ version => sub { print $version_info; exit },
+) or die $help_info;
+
+die $help_info unless @ARGV == 1;
+
+my %include = ();
+my %append = ();
+my @include = (); # retain order given in include file
+
+# Process include file (if given). Format is:
+#
+# [section name]
+# verbatim text
+#
+# or
+#
+# /pattern/
+# verbatim text
+#
+
+while (@opt_include)
+{
+ my ($inc, $required) = @{shift @opt_include};
+
+ next unless -f $inc or $required;
+ die "$this_program: can't open `$inc' ($!)\n"
+ unless open INC, $inc;
+
+ my $key;
+ my $hash = \%include;
+
+ while (<INC>)
+ {
+ # [section]
+ if (/^\[([^]]+)\]/)
+ {
+ $key = uc $1;
+ $key =~ s/^\s+//;
+ $key =~ s/\s+$//;
+ $hash = \%include;
+ push @include, $key unless $include{$key};
+ next;
+ }
+
+ # /pattern/
+ if (m!^/(.*)/([ims]*)!)
+ {
+ my $pat = $2 ? "(?$2)$1" : $1;
+
+ # Check pattern.
+ eval { $key = qr($pat) };
+ if ($@)
+ {
+ $@ =~ s/ at .*? line \d.*//;
+ die "$inc:$.:$@";
+ }
+
+ $hash = \%append;
+ next;
+ }
+
+ # Check for options before the first section--anything else is
+ # silently ignored, allowing the first for comments and
+ # revision info.
+ unless ($key)
+ {
+ # handle options
+ if (/^-/)
+ {
+ local @ARGV = split;
+ GetOptions %opt_def;
+ }
+
+ next;
+ }
+
+ $hash->{$key} ||= '';
+ $hash->{$key} .= $_;
+ }
+
+ close INC;
+
+ die "$this_program: no valid information found in `$inc'\n"
+ unless $key;
+}
+
+# Compress trailing blank lines.
+for my $hash (\(%include, %append))
+{
+ for (keys %$hash) { $hash->{$_} =~ s/\n+$/\n/ }
+}
+
+# Turn off localisation of executable's output.
+@ENV{qw(LANGUAGE LANG LC_ALL)} = ('C') x 3;
+
+# Turn off localisation of date (for strftime).
+setlocale LC_TIME, 'C';
+
+# Grab help and version info from executable.
+my ($help_text, $version_text) = map {
+ join '', map { s/ +$//; expand $_ } `$ARGV[0] $_ 2>/dev/null`
+ or die "$this_program: can't get `$_' info from $ARGV[0]\n"
+} $help_option, $version_option;
+
+my $date = strftime "%B %Y", localtime;
+(my $program = $ARGV[0]) =~ s!.*/!!;
+my $package = $program;
+my $version;
+
+if ($opt_output)
+{
+ unlink $opt_output
+ or die "$this_program: can't unlink $opt_output ($!)\n"
+ if -e $opt_output;
+
+ open STDOUT, ">$opt_output"
+ or die "$this_program: can't create $opt_output ($!)\n";
+}
+
+# The first line of the --version information is assumed to be in one
+# of the following formats:
+#
+# <version>
+# <program> <version>
+# {GNU,Free} <program> <version>
+# <program> ({GNU,Free} <package>) <version>
+# <program> - {GNU,Free} <package> <version>
+#
+# and seperated from any copyright/author details by a blank line.
+
+($_, $version_text) = split /\n+/, $version_text, 2;
+
+if (/^(\S+) +\(((?:GNU|Free) +[^)]+)\) +(.*)/ or
+ /^(\S+) +- *((?:GNU|Free) +\S+) +(.*)/)
+{
+ $program = $1;
+ $package = $2;
+ $version = $3;
+}
+elsif (/^((?:GNU|Free) +)?(\S+) +(.*)/)
+{
+ $program = $2;
+ $package = $1 ? "$1$2" : $2;
+ $version = $3;
+}
+else
+{
+ $version = $_;
+}
+
+$program =~ s!.*/!!;
+
+# No info for `info' itself.
+$opt_no_info = 1 if $program eq 'info';
+
+# --name overrides --include contents.
+$include{NAME} = "$program \\- $opt_name\n" if $opt_name;
+
+# Default (useless) NAME paragraph.
+$include{NAME} ||= "$program \\- manual page for $program $version\n";
+
+# Man pages traditionally have the page title in caps.
+my $PROGRAM = uc $program;
+
+# Set default page head/footers
+$source ||= "$program $version";
+unless ($manual)
+{
+ for ($section)
+ {
+ if (/^(1[Mm]|8)/) { $manual = 'System Administration Utilities' }
+ elsif (/^6/) { $manual = 'Games' }
+ else { $manual = 'User Commands' }
+ }
+}
+
+# Extract usage clause(s) [if any] for SYNOPSIS.
+if ($help_text =~ s/^Usage:( +(\S+))(.*)((?:\n(?: {6}\1| *or: +\S).*)*)//m)
+{
+ my @syn = $2 . $3;
+
+ if ($_ = $4)
+ {
+ s/^\n//;
+ for (split /\n/) { s/^ *(or: +)?//; push @syn, $_ }
+ }
+
+ my $synopsis = '';
+ for (@syn)
+ {
+ $synopsis .= ".br\n" if $synopsis;
+ s!^\S*/!!;
+ s/^(\S+) *//;
+ $synopsis .= ".B $1\n";
+ s/\s+$//;
+ s/(([][]|\.\.+)+)/\\fR$1\\fI/g;
+ s/^/\\fI/ unless s/^\\fR//;
+ $_ .= '\fR';
+ s/(\\fI)( *)/$2$1/g;
+ s/\\fI\\fR//g;
+ s/^\\fR//;
+ s/\\fI$//;
+ s/^\./\\&./;
+
+ $synopsis .= "$_\n";
+ }
+
+ $include{SYNOPSIS} ||= $synopsis;
+}
+
+# Process text, initial section is DESCRIPTION.
+my $sect = 'DESCRIPTION';
+$_ = "$help_text\n\n$version_text";
+
+# Normalise paragraph breaks.
+s/^\n+//;
+s/\n*$/\n/;
+s/\n\n+/\n\n/g;
+
+# Temporarily exchange leading dots, apostrophes and backslashes for
+# tokens.
+s/^\./\x80/mg;
+s/^'/\x81/mg;
+s/\\/\x82/g;
+
+# Start a new paragraph (if required) for these.
+s/([^\n])\n(Report +bugs|Email +bug +reports +to|Written +by)/$1\n\n$2/g;
+
+sub convert_option;
+
+while (length)
+{
+ # Convert some standard paragraph names.
+ if (s/^(Options|Examples): *\n//)
+ {
+ $sect = uc $1;
+ next;
+ }
+
+ # Copyright section
+ if (/^Copyright +[(\xa9]/)
+ {
+ $sect = 'COPYRIGHT';
+ $include{$sect} ||= '';
+ $include{$sect} .= ".PP\n" if $include{$sect};
+
+ my $copy;
+ ($copy, $_) = split /\n\n/, $_, 2;
+
+ for ($copy)
+ {
+ # Add back newline
+ s/\n*$/\n/;
+
+ # Convert iso9959-1 copyright symbol or (c) to nroff
+ # character.
+ s/^Copyright +(?:\xa9|\([Cc]\))/Copyright \\(co/mg;
+
+ # Insert line breaks before additional copyright messages
+ # and the disclaimer.
+ s/(.)\n(Copyright |This +is +free +software)/$1\n.br\n$2/g;
+
+ # Join hyphenated lines.
+ s/([A-Za-z])-\n */$1/g;
+ }
+
+ $include{$sect} .= $copy;
+ $_ ||= '';
+ next;
+ }
+
+ # Catch bug report text.
+ if (/^(Report +bugs|Email +bug +reports +to) /)
+ {
+ $sect = 'REPORTING BUGS';
+ }
+
+ # Author section.
+ elsif (/^Written +by/)
+ {
+ $sect = 'AUTHOR';
+ }
+
+ # Examples, indicated by an indented leading $, % or > are
+ # rendered in a constant width font.
+ if (/^( +)([\$\%>] )\S/)
+ {
+ my $indent = $1;
+ my $prefix = $2;
+ my $break = '.IP';
+ $include{$sect} ||= '';
+ while (s/^$indent\Q$prefix\E(\S.*)\n*//)
+ {
+ $include{$sect} .= "$break\n\\f(CW$prefix$1\\fR\n";
+ $break = '.br';
+ }
+
+ next;
+ }
+
+ my $matched = '';
+ $include{$sect} ||= '';
+
+ # Sub-sections have a trailing colon and the second line indented.
+ if (s/^(\S.*:) *\n / /)
+ {
+ $matched .= $& if %append;
+ $include{$sect} .= qq(.SS "$1"\n);
+ }
+
+ my $indent = 0;
+ my $content = '';
+
+ # Option with description.
+ if (s/^( {1,10}([+-]\S.*?))(?:( +(?!-))|\n( {20,}))(\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $indent = length ($4 || "$1$3");
+ $content = ".TP\n\x83$2\n\x83$5\n";
+ unless ($4)
+ {
+ # Indent may be different on second line.
+ $indent = length $& if /^ {20,}/;
+ }
+ }
+
+ # Option without description.
+ elsif (s/^ {1,10}([+-]\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $content = ".HP\n\x83$1\n";
+ $indent = 80; # not continued
+ }
+
+ # Indented paragraph with tag.
+ elsif (s/^( +(\S.*?) +)(\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $indent = length $1;
+ $content = ".TP\n\x83$2\n\x83$3\n";
+ }
+
+ # Indented paragraph.
+ elsif (s/^( +)(\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $indent = length $1;
+ $content = ".IP\n\x83$2\n";
+ }
+
+ # Left justified paragraph.
+ else
+ {
+ s/(.*)\n//;
+ $matched .= $& if %append;
+ $content = ".PP\n" if $include{$sect};
+ $content .= "$1\n";
+ }
+
+ # Append continuations.
+ while (s/^ {$indent}(\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $content .= "\x83$1\n"
+ }
+
+ # Move to next paragraph.
+ s/^\n+//;
+
+ for ($content)
+ {
+ # Leading dot and apostrophe protection.
+ s/\x83\./\x80/g;
+ s/\x83'/\x81/g;
+ s/\x83//g;
+
+ # Convert options.
+ s/(^| )(-[][\w=-]+)/$1 . convert_option $2/mge;
+ }
+
+ # Check if matched paragraph contains /pat/.
+ if (%append)
+ {
+ for my $pat (keys %append)
+ {
+ if ($matched =~ $pat)
+ {
+ $content .= ".PP\n" unless $append{$pat} =~ /^\./;
+ $content .= $append{$pat};
+ }
+ }
+ }
+
+ $include{$sect} .= $content;
+}
+
+# Refer to the real documentation.
+unless ($opt_no_info)
+{
+ my $info_page = $opt_info || $program;
+
+ $sect = 'SEE ALSO';
+ $include{$sect} ||= '';
+ $include{$sect} .= ".PP\n" if $include{$sect};
+ $include{$sect} .= <<EOT;
+The full documentation for
+.B $program
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B $program
+programs are properly installed at your site, the command
+.IP
+.B info $info_page
+.PP
+should give you access to the complete manual.
+EOT
+}
+
+# Output header.
+print <<EOT;
+.\\" DO NOT MODIFY THIS FILE! It was generated by $this_program $this_version.
+.TH $PROGRAM "$section" "$date" "$source" "$manual"
+EOT
+
+# Section ordering.
+my @pre = qw(NAME SYNOPSIS DESCRIPTION OPTIONS EXAMPLES);
+my @post = ('AUTHOR', 'REPORTING BUGS', 'COPYRIGHT', 'SEE ALSO');
+my $filter = join '|', @pre, @post;
+
+# Output content.
+for (@pre, (grep ! /^($filter)$/o, @include), @post)
+{
+ if ($include{$_})
+ {
+ my $quote = /\W/ ? '"' : '';
+ print ".SH $quote$_$quote\n";
+
+ for ($include{$_})
+ {
+ # Replace leading dot, apostrophe and backslash tokens.
+ s/\x80/\\&./g;
+ s/\x81/\\&'/g;
+ s/\x82/\\e/g;
+ print;
+ }
+ }
+}
+
+exit;
+
+# Convert option dashes to \- to stop nroff from hyphenating 'em, and
+# embolden. Option arguments get italicised.
+sub convert_option
+{
+ local $_ = '\fB' . shift;
+
+ s/-/\\-/g;
+ unless (s/\[=(.*)\]$/\\fR[=\\fI$1\\fR]/)
+ {
+ s/=(.)/\\fR=\\fI$1/;
+ s/ (.)/ \\fI$1/;
+ $_ .= '\fR';
+ }
+
+ $_;
+}
--- /dev/null
+#!@PYTHON@
+# html-gettext.py
+
+# USAGE: html-gettext.py [-o OUTDIR] LANG FILES
+#
+# -o OUTDIR specifies that output files should be written in OUTDIR
+# rather than be overwritten
+#
+
+import sys
+import re
+import os
+import getopt
+
+import langdefs
+
+optlist, args = getopt.getopt(sys.argv[1:],'o:')
+lang = args[0]
+files = args [1:]
+
+outdir = '.'
+for x in optlist:
+ if x[0] == '-o':
+ outdir = x[1]
+
+double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
+my_gettext = langdefs.translation[lang]
+
+html_codes = ((' -- ', ' – '),
+ (' --- ', ' — '),
+ ("'", '’'))
+texi_html_conversion = {
+ 'command': {
+ 'html2texi':
+ (re.compile (r'(?:<samp><span class="command">|<code>)(.*?)(?:</span></samp>|</code>)'),
+ r'@command{\1}'),
+ 'texi2html':
+ (re.compile (r'@command{(.*?)}'),
+ r'<code>\1</code>'),
+ },
+ 'code': {
+ 'html2texi':
+ (re.compile (r'<code>(.*?)</code>'),
+ r'@code{\1}'),
+ 'texi2html':
+ (re.compile (r'@code{(.*?)}'),
+ r'<code>\1</code>'),
+ },
+ }
+
+whitespaces = re.compile (r'\s+')
+
+
+def _ (s):
+ if not s:
+ return ''
+ str = whitespaces.sub (' ', s)
+ for c in html_codes:
+ str = str.replace (c[1], c[0])
+ for command in texi_html_conversion:
+ d = texi_html_conversion[command]
+ str = d['html2texi'][0].sub (d['html2texi'][1], str)
+ str = my_gettext (str)
+ str = d['texi2html'][0].sub (d['texi2html'][1], str)
+ for c in html_codes:
+ str = str.replace (c[0], c[1])
+ return str
+
+link_re = re.compile (r'<link rel="(up|prev|next)" (.*?) title="([^"]*?)">')
+
+def link_gettext (m):
+ return '<link rel="' + m.group (1) + '" ' + m.group (2) \
+ + ' title="' + _ (m.group (3)) + '">'
+
+makeinfo_title_re = re.compile (r'<title>([^<]*?) - ([^<]*?)</title>')
+
+def makeinfo_title_gettext (m):
+ return '<title>' + _ (m.group (1)) + ' - ' + m.group (2) + '</title>'
+
+texi2html_title_re = re.compile (r'<title>(.+): ([A-Z\d.]+ |)(.+?)</title>')
+
+def texi2html_title_gettext (m):
+ return '<title>' + _ (m.group (1)) + double_punct_char_separator + ': ' \
+ + m.group (2) + _ (m.group (3)) + '</title>'
+
+a_href_re = re.compile ('(?s)<a (?P<attributes>[^>]*?href="[\\w.#-_]+"[^>]*?>)(?P<code><code>)?\
+(?P<appendix>Appendix )?(?P<leading>[A-Z0-9.]+ | (?:<){1,2} | [^>:]+?: | |)\
+(?P<name>(?:<samp><span class="command">|</?code>|</span>|[^>])+?)(?P<end_code>(?(code)</code>|))\
+(?P<trailing> (?:>){1,2} | |)</a>:?')
+
+def a_href_gettext (m):
+ s = ''
+ if m.group(0)[-1] == ':':
+ s = double_punct_char_separator + ':'
+ t = ''
+ if m.group ('appendix'):
+ t = _ (m.group ('appendix'))
+ return '<a ' + m.group ('attributes') + (m.group ('code') or '') + \
+ t + m.group ('leading') + _ (m.group ('name')) + \
+ m.group ('end_code') + m.group ('trailing') + '</a>' + s
+
+h_re = re.compile (r'<h(\d)( class="\w+"|)>\s*(Appendix |)([A-Z\d.]+ |)(.+?)\s*</h\1>')
+
+def h_gettext (m):
+ if m.group (3):
+ s = _ (m.group (3))
+ else:
+ s= ''
+ return '<h' + m.group (1) + m.group (2) + '>' + s +\
+ m.group (4) + _ (m.group (5)) + '</h' + m.group (1) + '>'
+
+for filename in files:
+ f = open (filename, 'r')
+ page = f.read ()
+ f.close ()
+ page = link_re.sub (link_gettext, page)
+ page = makeinfo_title_re.sub (makeinfo_title_gettext, page)
+ page = texi2html_title_re.sub (texi2html_title_gettext, page)
+ page = a_href_re.sub (a_href_gettext, page)
+ page = h_re.sub (h_gettext, page)
+ for w in ('Next:', 'Previous:', 'Up:'):
+ page = page.replace (w, _ (w))
+ page = langdefs.LANGDICT[lang].html_filter (page)
+ f = open (os.path.join (outdir, filename), 'w')
+ f.write (page)
+ f.close ()
--- /dev/null
+#!@BASH@
+
+name=install-info-html
+version=1.0
+
+all=
+index_dir=.
+
+#
+# debugging
+#
+debug_echo=:
+
+
+#
+# print usage
+#
+help ()
+{
+ cat << EOF
+$name $version
+Install HTML info document.
+
+Usage: $name [OPTIONS]... [DOCUMENT-DIR]...
+
+Options:
+ -a, --all assume all subdirectories of index to be DOCUMENT-DIRs
+ -d, --dir=DIR set index directory to DIR (default=.)
+ -D, --debug print debugging info
+ -h, --help show this help text
+ -v, --version show version
+EOF
+}
+
+
+cleanup ()
+{
+ $debug_echo "cleaning ($?)..."
+}
+
+trap cleanup 0 9 15
+
+#
+# Find command line options and switches
+#
+
+# "x:" x takes argument
+#
+options="adhvW:"
+#
+# ugh, "\-" is a hack to support long options
+# must be in double quotes for bash-2.0
+
+while getopts "\-:$options" O
+do
+ $debug_echo "O: \`$O'"
+ $debug_echo "arg: \`$OPTARG'"
+ case $O in
+ a)
+ all=yes
+ ;;
+ D)
+ [ "$debug_echo" = "echo" ] && set -x
+ debug_echo=echo
+ ;;
+ h)
+ help;
+ exit 0
+ ;;
+ v)
+ echo $name $version
+ exit 0
+ ;;
+ d)
+ index_dir=$OPTARG
+ ;;
+ # a long option!
+ -)
+ case "$OPTARG" in
+ a*|-a*)
+ all=yes
+ ;;
+ de*|-de*)
+ [ "$debug_echo" = "echo" ] && set -x
+ debug_echo=echo
+ ;;
+ h*|-h*)
+ help;
+ exit 0
+ ;;
+ di*|-di*)
+ index_dir="`expr \"$OPTARG\" ':' '[^=]*=\(.*\)'`"
+ ;;
+ version|-version)
+ echo $name $version
+ exit 0
+ ;;
+ *|-*)
+ echo "$0: invalid option -- \"$OPTARG\""
+ help;
+ exit -1
+ ;;
+ esac
+ esac
+done
+shift `expr $OPTIND - 1`
+
+#
+# Input file name
+#
+if [ -z "$all" -a -z "$1" ]; then
+ help
+ echo "$name: No HTML documents given"
+ exit 2
+fi
+
+if [ -n "$all" -a -n "$1" ]; then
+ echo "$name: --all specified, ignoring DIRECTORY-DIRs"
+fi
+
+if [ -n "$all" ]; then
+ document_dirs=`/bin/ls -d1 $index_dir`
+else
+ document_dirs=$*
+fi
+
+index_file=$index_dir/index.html
+rm -f $index_file
+echo -n "$name: Writing index: $index_file..."
+
+# head
+cat >> $index_file <<EOF
+<html>
+<title>Info documentation index</title>
+<body>
+<h1>Info documentation index</h1>
+<p>
+This is the directory file \`index.html' a.k.a. \`DIR', which contains the
+topmost node of the HTML Info hierarchy.
+</p>
+<ul>
+EOF
+
+#list
+for i in $document_dirs; do
+ cat <<EOF
+<li> <a href="$i/index.html">$i</a> (<a href="$i.html">$i as one big page</a>)</li>
+EOF
+done >> $index_file
+
+# foot
+cat >> $index_file <<EOF
+</ul>
+</body>
+</html>
+EOF
+echo
--- /dev/null
+#!@PYTHON@
+
+# Created 01 September 2003 by Heikki Junes.
+# Rewritten by John Mandereau
+
+# Generates lilypond-words.el for (X)Emacs and lilypond-words[.vim] for Vim.
+
+import re
+import sys
+import os
+import getopt
+
+keywords = []
+reserved_words = []
+note_names = []
+
+# keywords not otherwise found
+keywords += ['include', 'maininput', 'version']
+
+# the main keywords
+s = open ('lily/lily-lexer.cc', 'r').read ()
+keywords += [w for w in re.findall (r"\s*{\"(.+)\",\s*.*},\s*\n", s)]
+
+s = open ('scm/markup.scm', 'r').read ()
+keywords += [w for w in re.findall (r"(?m)^\s*\(cons\s*([a-z-]+)-markup", s)]
+
+# identifiers and keywords
+for name in ['ly/chord-modifiers-init.ly',
+ 'ly/dynamic-scripts-init.ly',
+ 'ly/engraver-init.ly',
+ 'ly/grace-init.ly',
+ 'ly/gregorian.ly',
+ 'ly/music-functions-init.ly',
+ 'ly/performer-init.ly',
+ 'ly/property-init.ly',
+ 'ly/scale-definitions-init.ly',
+ 'ly/script-init.ly',
+ 'ly/spanners-init.ly',
+ 'ly/declarations-init.ly',
+ 'ly/params-init.ly']:
+ s = open (name, 'r').read ()
+ keywords += [w for w in re.findall (r"(?m)^\s*\"?([a-zA-Z]+)\"?\s*=", s)]
+
+# note names
+for name in ['ly/catalan.ly',
+ 'ly/deutsch.ly',
+ 'ly/drumpitch-init.ly',
+ 'ly/english.ly',
+ 'ly/espanol.ly',
+ 'ly/italiano.ly',
+ 'ly/nederlands.ly',
+ 'ly/norsk.ly',
+ 'ly/portugues.ly',
+ 'ly/suomi.ly',
+ 'ly/svenska.ly',
+ 'ly/vlaams.ly']:
+ s = open (name, 'r').read ()
+ note_names += [n for n in re.findall (r"(?m)^\s*\(([a-z]+)[^l]+ly:make-pitch", s)]
+
+# reserved words
+for name in ['ly/engraver-init.ly',
+ 'ly/performer-init.ly']:
+ s = open (name, 'r').read ()
+ for pattern in [r"(?m)^\s*.consists\s+\"([a-zA-Z_]+)\"",
+ r"[\\]name\s+[\"]?([a-zA-Z_]+)[\"]?",
+ r"\s+([a-zA-Z_]+)\s*\\(?:set|override)"]:
+ reserved_words += [w for w in re.findall (pattern, s)]
+
+keywords = list (set (keywords))
+keywords.sort (reverse=True)
+
+reserved_words = list (set (reserved_words))
+reserved_words.sort (reverse=True)
+
+note_names = list (set (note_names))
+note_names.sort (reverse=True)
+
+
+# output
+outdir = ''
+out_words = False
+out_el = False
+out_vim = False
+
+options = getopt.getopt (sys.argv[1:],
+ '', ['words', 'el', 'vim', 'dir='])[0]
+
+for (o, a) in options:
+ if o == '--words':
+ out_words = True
+ elif o == '--el':
+ out_el = True
+ elif o == '--vim':
+ out_vim = True
+ elif o == '--dir':
+ outdir = a
+
+if out_words or out_el:
+ outstring = ''.join (['\\\\' + w + '\n' for w in keywords])
+ outstring += ''.join ([w + '\n' for w in reserved_words])
+ outstring += ''.join ([w + '\n' for w in note_names])
+
+if out_words:
+ f = open (os.path.join (outdir, 'lilypond-words'), 'w')
+ f.write (outstring)
+
+if out_el:
+ f = open (os.path.join (outdir, 'lilypond-words.el'), 'w')
+ f.write (outstring)
+
+ # the menu in lilypond-mode.el
+ # for easier typing of this list, replace '/' with '\' below
+ # when writing to file
+ elisp_menu = ['/( - _ /) -',
+ '/[ - _ /] -',
+ '< - _ > -',
+ '<< - _ >> -',
+ '///( - _ ///) -',
+ '///[ - _ ///] -',
+ '///< - _ ///! -',
+ '///> - _ ///! -',
+ '//center - / << _ >> -',
+ '//column - / << _ >> -',
+ '//context/ Staff/ = - % { _ } -',
+ '//context/ Voice/ = - % { _ } -',
+ '//markup - { _ } -',
+ '//notes - { _ } -',
+ '//relative - % { _ } -',
+ '//score - { //n /? //simultaneous { //n _ //n } /! //n //paper { } //n /? //midi { } //n /! } //n -',
+ '//simultaneous - { _ } -',
+ '//sustainDown - _ //sustainUp -',
+ '//times - % { _ } -',
+ '//transpose - % { _ } -',
+ '']
+ f.write ('\n'.join ([line.replace ('/', '\\') for line in elisp_menu]))
+
+if out_vim:
+ f = open (os.path.join (outdir, 'lilypond-words.vim'), 'w')
+ f.write ('syn match lilyKeyword \"[-_^]\\?\\\\\\(')
+ f.write (''.join ([w + '\\|' for w in keywords]))
+ f.write ('n\\)\\(\\A\\|\\n\\)\"me=e-1\n')
+
+ f.write ('syn match lilyReservedWord \"\\(\\A\\|\\n\\)\\(')
+ f.write (''.join ([w + '\\|' for w in reserved_words]))
+ f.write ('Score\\)\\(\\A\\|\\n\\)\"ms=s+1,me=e-1\n')
+
+ f.write ('syn match lilyNote \"\\<\\(\\(\\(')
+ f.write (''.join ([w + '\\|' for w in note_names]))
+ f.write ('a\\)\\([,\']\\)\\{,4}\\([?!]\\)\\?\\)\\|s\\|r\\)\\(\\(128\\|64\\|32\\|16\\|8\\|4\\|2\\|1\\|\\\\breve\\|\\\\longa\\|\\\\maxima\\)[.]\\{,8}\\)\\?\\(\\A\\|\\n\\)\"me=e-1\n')
--- /dev/null
+#!@PYTHON@
+
+
+'''
+TODO:
+
+ * Add @nodes, split at sections?
+
+'''
+
+
+import sys
+import os
+import getopt
+import re
+
+program_name = 'lys-to-tely'
+
+include_snippets = '@lysnippets'
+fragment_options = 'printfilename,texidoc'
+help_text = r"""Usage: %(program_name)s [OPTIONS]... LY-FILE...
+Construct tely doc from LY-FILEs.
+
+Options:
+ -h, --help print this help
+ -f, --fragment-options=OPTIONS use OPTIONS as lilypond-book fragment
+ options
+ -o, --output=NAME write tely doc to NAME
+ -t, --title=TITLE set tely doc title TITLE
+ --template=TEMPLATE use TEMPLATE as Texinfo template file,
+ instead of standard template; TEMPLATE should contain a command
+ '%(include_snippets)s' to tell where to insert LY-FILEs. When this
+ option is used, NAME and TITLE are ignored.
+"""
+
+def help (text):
+ sys.stdout.write ( text)
+ sys.exit (0)
+
+(options, files) = getopt.getopt (sys.argv[1:], 'f:hn:t:',
+ ['fragment-options=', 'help', 'name=', 'title=', 'template='])
+
+name = "ly-doc"
+title = "Ly Doc"
+template = '''\input texinfo
+@setfilename %%(name)s.info
+@settitle %%(title)s
+
+@documentencoding utf-8
+@iftex
+@afourpaper
+@end iftex
+
+@finalout @c we do not want black boxes.
+
+@c fool ls-latex
+@ignore
+@author Han-Wen Nienhuys and Jan Nieuwenhuizen
+@title %%(title)s
+@end ignore
+
+@node Top, , , (dir)
+@top %%(title)s
+
+%s
+
+@bye
+''' % include_snippets
+
+for opt in options:
+ o = opt[0]
+ a = opt[1]
+ if o == '-h' or o == '--help':
+ # We can't use vars () inside a function, as that only contains all
+ # local variables and none of the global variables! Thus we have to
+ # generate the help text here and pass it to the function...
+ help (help_text % vars ())
+ elif o == '-n' or o == '--name':
+ name = a
+ elif o == '-t' or o == '--title':
+ title = a
+ elif o == '-f' or o == '--fragment-options':
+ fragment_options = a
+ elif o == '--template':
+ template = open (a, 'r').read ()
+ else:
+ raise Exception ('unknown option: ' + o)
+
+texi_file_re = re.compile ('.*\.i?te(ly|xi)$')
+
+def name2line (n):
+ if texi_file_re.match (n):
+ # We have a texi include file, simply include it:
+ s = r"@include %s" % os.path.basename (n)
+ else:
+ # Assume it's a lilypond file -> create image etc.
+ s = r"""
+@ifhtml
+@html
+<a name="%s"></a>
+@end html
+@end ifhtml
+
+@lilypondfile[%s]{%s}
+""" % (os.path.basename (n), fragment_options, n)
+ return s
+
+if files:
+ dir = os.path.dirname (name) or "."
+# don't strip .tely extension, input/lsr uses .itely
+ name = os.path.basename (name)
+ template = template % vars ()
+
+ s = "\n".join (map (name2line, files))
+ s = template.replace (include_snippets, s, 1)
+ f = "%s/%s" % (dir, name)
+ sys.stderr.write ("%s: writing %s..." % (program_name, f))
+ h = open (f, "w")
+ h.write (s)
+ h.close ()
+ sys.stderr.write ('\n')
+else:
+ # not Unix philosophy, but hey, at least we notice when
+ # we don't distribute any .ly files.
+ sys.stderr.write ("No files specified. Doing nothing")
--- /dev/null
+#!@PYTHON@
+# mass-link.py
+
+# USAGE: mass-link.py [--prepend-suffix SUFFIX] symbolic | hard SOURCEDIR DESTDIR FILES
+#
+# create hard or symbolic links to SOURCEDIR/FILES in DESTDIR
+#
+# If --prepend-suffix is specified, link to foo.bar will be called fooSUFFIX.bar.
+# Shell wildcards expansion is performed on FILES.
+
+import sys
+import os
+import glob
+import getopt
+
+print "mass-link.py"
+
+optlist, args = getopt.getopt (sys.argv[1:], '', ['prepend-suffix='])
+link_type, source_dir, dest_dir = args[0:3]
+files = args[3:]
+
+source_dir = os.path.normpath (source_dir)
+dest_dir = os.path.normpath (dest_dir)
+
+prepended_suffix = ''
+for x in optlist:
+ if x[0] == '--prepend-suffix':
+ prepended_suffix = x[1]
+
+if prepended_suffix:
+ def insert_suffix (p):
+ l = p.split ('.')
+ if len (l) >= 2:
+ l[-2] += prepended_suffix
+ return '.'.join (l)
+ return p + prepended_suffix
+else:
+ insert_suffix = lambda p: p
+
+if link_type == 'symbolic':
+ link = os.symlink
+elif link_type == 'hard':
+ link = os.link
+else:
+ sys.stderr.write(sys.argv[0] + ': ' + link_type + ": wrong argument, expected 'symbolic' or 'hard'\n")
+ sys.exit (1)
+
+sourcefiles = []
+for pattern in files:
+ sourcefiles += (glob.glob (os.path.join (source_dir, pattern)))
+
+def relative_path (f):
+ if source_dir == '.':
+ return f
+ return f[len (source_dir) + 1:]
+
+destfiles = [os.path.join (dest_dir, insert_suffix (relative_path (f))) for f in sourcefiles]
+
+destdirs = set ([os.path.dirname (dest) for dest in destfiles])
+[os.makedirs (d) for d in destdirs if not os.path.exists (d)]
+
+def force_link (src,dest):
+ if os.path.exists (dest):
+ os.system ('rm -f ' + dest)
+ link (src, dest)
+
+map (force_link, sourcefiles, destfiles)
--- /dev/null
+#!@PYTHON@
+
+# mf-to-table.py -- convert spacing info in MF logs .
+#
+# source file of the GNU LilyPond music typesetter
+#
+# (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
+
+import os
+import sys
+import getopt
+import re
+import time
+
+def read_log_file (fn):
+ str = open (fn).read ()
+ str = re.sub ('\n', '', str)
+ str = re.sub ('[\t ]+', ' ', str)
+
+ deps = []
+ autolines = []
+ def include_func (match, d = deps):
+ d.append (match.group (1))
+ return ''
+
+ def auto_func (match, a = autolines):
+ a.append (match.group (1))
+ return ''
+
+ str = re.sub ('\\(([/a-z.A-Z_0-9-]+\\.mf)', include_func, str)
+ str = re.sub ('@{(.*?)@}', auto_func, str)
+
+ return (autolines, deps)
+
+
+class Char_metric:
+ def __init__ (self):
+ pass
+
+font_family = 'feta'
+
+def parse_logfile (fn):
+ autolines, deps = read_log_file (fn)
+ charmetrics = []
+
+ global_info = {
+ 'filename' : os.path.splitext (os.path.basename (fn))[0]
+ }
+ group = ''
+
+ for l in autolines:
+ tags = l.split ('@:')
+ if tags[0] == 'group':
+ group = tags[1]
+ elif tags[0] == 'puorg':
+ group = ''
+ elif tags[0] == 'char':
+ name = tags[9]
+
+ if group:
+ name = group + '.' + name
+ m = {
+ 'description': tags[1],
+ 'name': name,
+ 'code': int (tags[2]),
+ 'breapth': float (tags[3]),
+ 'width': float (tags[4]),
+ 'depth': float (tags[5]),
+ 'height': float (tags[6]),
+ 'wx': float (tags[7]),
+ 'wy': float (tags[8]),
+ }
+ charmetrics.append (m)
+ elif tags[0] == 'font':
+ global font_family
+ font_family = (tags[3])
+ # To omit 'GNU' (foundry) from font name proper:
+ # name = tags[2:]
+ #urg
+ if 0: # testing
+ tags.append ('Regular')
+
+ encoding = re.sub (' ','-', tags[5])
+ tags = tags[:-1]
+ name = tags[1:]
+ global_info['design_size'] = float (tags[4])
+ global_info['FontName'] = '-'.join (name)
+ global_info['FullName'] = ' '.join (name)
+ global_info['FamilyName'] = '-'.join (name[1:-1])
+ if 1:
+ global_info['Weight'] = tags[4]
+ else: # testing
+ global_info['Weight'] = tags[-1]
+
+ global_info['FontBBox'] = '0 0 1000 1000'
+ global_info['Ascender'] = '0'
+ global_info['Descender'] = '0'
+ global_info['EncodingScheme'] = encoding
+
+ elif tags[0] == 'parameter':
+ global_info[tags[1]] = tags[2];
+
+ return (global_info, charmetrics, deps)
+
+
+
+def character_lisp_table (global_info, charmetrics):
+
+ def conv_char_metric (charmetric):
+ f = 1.0
+ s = """(%s .
+((bbox . (%f %f %f %f))
+(subfont . "%s")
+(subfont-index . %d)
+(attachment . (%f . %f))))
+""" %(charmetric['name'],
+ -charmetric['breapth'] * f,
+ -charmetric['depth'] * f,
+ charmetric['width'] * f,
+ charmetric['height'] * f,
+ global_info['filename'],
+ charmetric['code'],
+ charmetric['wx'],
+ charmetric['wy'])
+
+ return s
+
+ s = ''
+ for c in charmetrics:
+ s += conv_char_metric (c)
+
+ return s
+
+
+def global_lisp_table (global_info):
+ str = ''
+
+ keys = ['staffsize', 'stafflinethickness', 'staff_space',
+ 'linethickness', 'black_notehead_width', 'ledgerlinethickness',
+ 'design_size',
+ 'blot_diameter'
+ ]
+ for k in keys:
+ if global_info.has_key (k):
+ str = str + "(%s . %s)\n" % (k,global_info[k])
+
+ return str
+
+
+def ps_encoding (name, global_info, charmetrics):
+ encs = ['.notdef'] * 256
+ for m in charmetrics:
+ encs[m['code']] = m['name']
+
+
+ s = ('/%s [\n' % name)
+ for m in range (0, 256):
+ s += (' /%s %% %d\n' % (encs[m], m))
+ s += ('] def\n')
+ return s
+
+def get_deps (deps, targets):
+ s = ''
+ for t in targets:
+ t = re.sub ( '^\\./', '', t)
+ s += ('%s '% t)
+ s += (": ")
+ for d in deps:
+ s += ('%s ' % d)
+ s += ('\n')
+ return s
+
+def help ():
+ sys.stdout.write(r"""Usage: mf-to-table [OPTIONS] LOGFILEs
+
+Generate feta metrics table from preparated feta log.
+
+Options:
+ -d, --dep=FILE print dependency info to FILE
+ -h, --help print this help
+ -l, --ly=FILE name output table
+ -o, --outdir=DIR prefix for dependency info
+ -p, --package=DIR specify package
+
+ """)
+ sys.exit (0)
+
+
+(options, files) = \
+ getopt.getopt (sys.argv[1:],
+ 'a:d:ho:p:t:',
+ ['enc=', 'outdir=', 'dep=', 'lisp=',
+ 'global-lisp=',
+ 'debug', 'help', 'package='])
+
+global_lisp_nm = ''
+char_lisp_nm = ''
+enc_nm = ''
+depfile_nm = ''
+lyfile_nm = ''
+outdir_prefix = '.'
+
+for opt in options:
+ o = opt[0]
+ a = opt[1]
+ if o == '--dep' or o == '-d':
+ depfile_nm = a
+ elif o == '--outdir' or o == '-o':
+ outdir_prefix = a
+ elif o == '--lisp':
+ char_lisp_nm = a
+ elif o == '--global-lisp':
+ global_lisp_nm = a
+ elif o == '--enc':
+ enc_nm = a
+ elif o== '--help' or o == '-h':
+ help()
+ elif o == '--debug':
+ debug_b = 1
+ else:
+ print o
+ raise getopt.error
+
+base = os.path.splitext (lyfile_nm)[0]
+
+for filenm in files:
+ (g, m, deps) = parse_logfile (filenm)
+
+ enc_name = 'FetaEncoding'
+ if re.search ('parmesan', filenm):
+ enc_name = 'ParmesanEncoding'
+ elif re.search ('feta-brace', filenm):
+ enc_name = 'FetaBraceEncoding'
+ elif re.search ('feta-alphabet', filenm):
+ enc_name = 'FetaAlphabetEncoding';
+
+ open (enc_nm, 'w').write (ps_encoding (enc_name, g, m))
+ open (char_lisp_nm, 'w').write (character_lisp_table (g, m))
+ open (global_lisp_nm, 'w').write (global_lisp_table (g))
+ if depfile_nm:
+ open (depfile_nm, 'wb').write (get_deps (deps,
+ [base + '.log', base + '.dvi', base + '.pfa',
+ depfile_nm,
+ base + '.pfb']))
--- /dev/null
+#!@PERL@
+
+##################################################
+# Convert stylized Metafont to PostScript Type 1 #
+# By Scott Pakin <scott+mf@pakin.org> #
+##################################################
+
+########################################################################
+# mf2pt1 #
+# Copyright (C) 2008 Scott Pakin #
+# #
+# This program may be distributed and/or modified under the conditions #
+# of the LaTeX Project Public License, either version 1.3c of this #
+# license or (at your option) any later version. #
+# #
+# The latest version of this license is in: #
+# #
+# http://www.latex-project.org/lppl.txt #
+# #
+# and version 1.3c or later is part of all distributions of LaTeX #
+# version 2006/05/20 or later. #
+########################################################################
+
+our $VERSION = "2.4.4"; # mf2pt1 version number
+require 5.6.1; # I haven't tested mf2pt1 with older Perl versions
+
+use File::Basename;
+use File::Spec;
+use Getopt::Long;
+use Pod::Usage;
+use Math::Trig;
+use warnings;
+use strict;
+
+# Define some common encoding vectors.
+my @standardencoding =
+ ((map {"_a$_"} (0..31)),
+ qw (space exclam quotedbl numbersign dollar percent ampersand
+ quoteright parenleft parenright asterisk plus comma hyphen
+ period slash zero one two three four five six seven eight
+ nine colon semicolon less equal greater question at A B C D E
+ F G H I J K L M N O P Q R S T U V W X Y Z bracketleft
+ backslash bracketright asciicircum underscore quoteleft a b c
+ d e f g h i j k l m n o p q r s t u v w x y z braceleft bar
+ braceright asciitilde),
+ (map {"_a$_"} (127..160)),
+ qw (exclamdown cent sterling fraction yen florin section currency
+ quotesingle quotedblleft guillemotleft guilsinglleft
+ guilsinglright fi fl _a176 endash dagger daggerdbl
+ periodcentered _a181 paragraph bullet quotesinglbase
+ quotedblbase quotedblright guillemotright ellipsis
+ perthousand _a190 questiondown _a192 grave acute circumflex
+ tilde macron breve dotaccent dieresis _a201 ring cedilla
+ _a204 hungarumlaut ogonek caron emdash),
+ (map {"_a$_"} (209..224)),
+ qw (AE _a226 ordfeminine _a228 _a229 _a230 _a231 Lslash Oslash OE
+ ordmasculine _a236 _a237 _a238 _a239 _a240 ae _a242 _a243
+ _a244 dotlessi _a246 _a247 lslash oslash oe germandbls _a252
+ _a253 _a254 _a255));
+my @isolatin1encoding =
+ ((map {"_a$_"} (0..31)),
+ qw (space exclam quotedbl numbersign dollar percent ampersand
+ quoteright parenleft parenright asterisk plus comma minus
+ period slash zero one two three four five six seven eight
+ nine colon semicolon less equal greater question at A B C D E
+ F G H I J K L M N O P Q R S T U V W X Y Z bracketleft
+ backslash bracketright asciicircum underscore quoteleft a b c
+ d e f g h i j k l m n o p q r s t u v w x y z braceleft bar
+ braceright asciitilde),
+ (map {"_a$_"} (128..143)),
+ qw (dotlessi grave acute circumflex tilde macron breve dotaccent
+ dieresis _a153 ring cedilla _a156 hungarumlaut ogonek
+ caron space exclamdown cent sterling currency yen brokenbar
+ section dieresis copyright ordfeminine guillemotleft
+ logicalnot hyphen registered macron degree plusminus
+ twosuperior threesuperior acute mu paragraph periodcentered
+ cedilla onesuperior ordmasculine guillemotright onequarter
+ onehalf threequarters questiondown Agrave Aacute Acircumflex
+ Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex
+ Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde
+ Ograve Oacute Ocircumflex Otilde Odieresis multiply Oslash
+ Ugrave Uacute Ucircumflex Udieresis Yacute Thorn germandbls
+ agrave aacute acircumflex atilde adieresis aring ae ccedilla
+ egrave eacute ecircumflex edieresis igrave iacute icircumflex
+ idieresis eth ntilde ograve oacute ocircumflex otilde
+ odieresis divide oslash ugrave uacute ucircumflex udieresis
+ yacute thorn ydieresis));
+my @ot1encoding =
+ qw (Gamma Delta Theta Lambda Xi Pi Sigma Upsilon Phi
+ Psi Omega ff fi fl ffi ffl dotlessi dotlessj grave acute caron
+ breve macron ring cedilla germandbls ae oe oslash AE OE Oslash
+ suppress exclam quotedblright numbersign dollar percent
+ ampersand quoteright parenleft parenright asterisk plus comma
+ hyphen period slash zero one two three four five six seven
+ eight nine colon semicolon exclamdown equal questiondown
+ question at A B C D E F G H I J K L M N O P Q R S T U V W X Y
+ Z bracketleft quotedblleft bracketright circumflex dotaccent
+ quoteleft a b c d e f g h i j k l m n o p q r s t u v w x y z
+ endash emdash hungarumlaut tilde dieresis);
+my @t1encoding =
+ qw (grave acute circumflex tilde dieresis hungarumlaut ring caron
+ breve macron dotaccent cedilla ogonek quotesinglbase
+ guilsinglleft guilsinglright quotedblleft quotedblright
+ quotedblbase guillemotleft guillemotright endash emdash cwm
+ perthousand dotlessi dotlessj ff fi fl ffi ffl space exclam
+ quotedbl numbersign dollar percent ampersand quoteright
+ parenleft parenright asterisk plus comma hyphen period slash
+ zero one two three four five six seven eight nine colon
+ semicolon less equal greater question at A B C D E F G H I J K L
+ M N O P Q R S T U V W X Y Z bracketleft backslash bracketright
+ asciicircum underscore quoteleft a b c d e f g h i j k l m n o p
+ q r s t u v w x y z braceleft bar braceright asciitilde
+ sfthyphen Abreve Aogonek Cacute Ccaron Dcaron Ecaron Eogonek
+ Gbreve Lacute Lcaron Lslash Nacute Ncaron Eng Ohungarumlaut
+ Racute Rcaron Sacute Scaron Scedilla Tcaron Tcedilla
+ Uhungarumlaut Uring Ydieresis Zacute Zcaron Zdotaccent IJ
+ Idotaccent dcroat section abreve aogonek cacute ccaron dcaron
+ ecaron eogonek gbreve lacute lcaron lslash nacute ncaron eng
+ ohungarumlaut racute rcaron sacute scaron scedilla tcaron
+ tcedilla uhungarumlaut uring ydieresis zacute zcaron zdotaccent
+ ij exclamdown questiondown sterling Agrave Aacute Acircumflex
+ Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex
+ Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde Ograve
+ Oacute Ocircumflex Otilde Odieresis OE Oslash Ugrave Uacute
+ Ucircumflex Udieresis Yacute Thorn SS agrave aacute acircumflex
+ atilde adieresis aring ae ccedilla egrave eacute ecircumflex
+ edieresis igrave iacute icircumflex idieresis eth ntilde ograve
+ oacute ocircumflex otilde odieresis oe oslash ugrave uacute
+ ucircumflex udieresis yacute thorn germandbls);
+
+# Define font parameters that the user can override.
+my $fontversion;
+my $creationdate;
+my $comment;
+my $familyname;
+my $weight;
+my $fullname;
+my $fixedpitch;
+my $italicangle;
+my $underlinepos;
+my $underlinethick;
+my $fontname;
+my $uniqueID;
+my $designsize;
+my ($mffile, $pt1file, $pfbfile, $ffscript);
+my $encoding;
+my $rounding;
+my $bpppix;
+
+# Define all of our other global variables.
+my $progname = basename $0, ".pl";
+my $mag;
+my @fontbbox;
+my @charbbox;
+my @charwd;
+my @glyphname;
+my @charfiles;
+my $filebase;
+my $filedir;
+my $filenoext;
+my $versionmsg = "mf2pt1 version $VERSION
+
+Copyright (C) 2008 Scott Pakin
+
+This program may be distributed and/or modified under the conditions
+of the LaTeX Project Public License, either version 1.3c of this
+license or (at your option) any later version.
+
+The latest version of this license is in:
+
+ http://www.latex-project.org/lppl.txt
+
+and version 1.3c or later is part of all distributions of LaTeX
+version 2006/05/20 or later.
+";
+
+
+######################################################################
+
+# The routines to compute the fractional approximation of a real number
+# are heavily based on code posted by Ben Tilly
+# <http://www.perlmonks.org/?node_id=26179> on Nov 16th, 2000, to the
+# PerlMonks list. See <http://www.perlmonks.org/index.pl?node_id=41961>.
+
+
+# Takes numerator/denominator pairs.
+# Returns a PS fraction string representation (with a trailing space).
+sub frac_string (@)
+{
+ my $res = "";
+
+ while (@_) {
+ my $n = shift;
+ my $d = shift;
+ $res .= $n . " ";
+ $res .= $d . " div " if $d > 1;
+ }
+
+ return $res;
+}
+
+
+# Takes a number.
+# Returns a numerator and denominator with the smallest denominator
+# so that the difference of the resulting fraction to the number is
+# smaller or equal to $rounding.
+sub frac_approx ($)
+{
+ my $num = shift;
+ my $f = ret_frac_iter ($num);
+
+ while (1) {
+ my ($n, $m) = $f->();
+ my $approx = $n / $m;
+ my $delta = abs ($num - $approx);
+ return ($n, $m) if ($delta <= $rounding);
+ }
+}
+
+
+# Takes a number, returns the best integer approximation and (in list
+# context) the error.
+sub best_int ($)
+{
+ my $x = shift;
+ my $approx = sprintf '%.0f', $x;
+ if (wantarray) {
+ return ($approx, $x - $approx);
+ }
+ else {
+ return $approx;
+ }
+}
+
+
+# Takes a numerator and denominator, in scalar context returns
+# the best fraction describing them, in list the numerator and
+# denominator.
+sub frac_standard ($$)
+{
+ my $n = best_int(shift);
+ my $m = best_int(shift);
+ my $k = gcd($n, $m);
+ $n /= $k;
+ $m /= $k;
+ if ($m < 0) {
+ $n *= -1;
+ $m *= -1;
+ }
+ if (wantarray) {
+ return ($n, $m);
+ }
+ else {
+ return "$n/$m";
+ }
+}
+
+
+# Euclidean algorithm for calculating a GCD.
+# Takes two integers, returns the greatest common divisor.
+sub gcd ($$)
+{
+ my ($n, $m) = @_;
+ while ($m) {
+ my $k = $n % $m;
+ ($n, $m) = ($m, $k);
+ }
+ return $n;
+}
+
+
+# Takes a list of terms in a continued fraction, and converts it
+# into a fraction.
+sub ints_to_frac (@)
+{
+ my ($n, $m) = (0, 1); # Start with 0
+ while (@_) {
+ my $k = pop;
+ if ($n) {
+ # Want frac for $k + 1/($n/$m)
+ ($n, $m) = frac_standard($k*$n + $m, $n);
+ }
+ else {
+ # Want $k
+ ($n, $m) = frac_standard($k, 1);
+ }
+ }
+ return frac_standard($n, $m);
+}
+
+
+# Takes a number, returns an anon sub which iterates through a set of
+# fractional approximations that converges very quickly to the number.
+sub ret_frac_iter ($)
+{
+ my $x = shift;
+ my $term_iter = ret_next_term_iter($x);
+ my @ints;
+ return sub {
+ push @ints, $term_iter->();
+ return ints_to_frac(@ints);
+ }
+}
+
+
+# Terms of a continued fraction converging on that number.
+sub ret_next_term_iter ($)
+{
+ my $x = shift;
+ return sub {
+ (my $n, $x) = best_int($x);
+ if (0 != $x) {
+ $x = 1/$x;
+ }
+ return $n;
+ }
+}
+
+######################################################################
+
+# Round a number to the nearest integer.
+sub round ($)
+{
+ return int($_[0] + 0.5*($_[0] <=> 0));
+}
+
+
+# Round a number to a given precision.
+sub prec ($)
+{
+ return round ($_[0] / $rounding) * $rounding;
+}
+
+
+# Set a variable's value to the first defined value in the given list.
+# If the variable was not previously defined and no value in the list
+# is defined, do nothing.
+sub assign_default (\$@)
+{
+ my $varptr = shift; # Pointer to variable to define
+ return if defined $$varptr && $$varptr ne "UNSPECIFIED";
+ foreach my $val (@_) {
+ next if !defined $val;
+ $$varptr = $val;
+ return;
+ }
+}
+
+
+# Print and execute a shell command. An environment variable with the
+# same name as the command overrides the command name. Return 1 on
+# success, 0 on failure. Optionally abort if the command fails, based
+# on the first argument to execute_command.
+sub execute_command ($@)
+{
+ my $abort_on_failure = shift;
+ my @command = @_;
+ $command[0] = $ENV{uc $command[0]} || $command[0];
+ my $prettyargs = join (" ", map {/[\\ ]/ ? "'$_'" : $_} @command);
+ print "Invoking \"$prettyargs\"...\n";
+ my $result = system @command;
+ die "${progname}: \"$prettyargs\" failed ($!)\n" if $result && $abort_on_failure;
+ return !$result;
+}
+
+
+# Output the font header.
+sub output_header ()
+{
+ # Show the initial boilerplate.
+ print OUTFILE <<"ENDHEADER";
+%!FontType1-1.0: $fontname $fontversion
+%%CreationDate: $creationdate
+% Font converted to Type 1 by mf2pt1, written by Scott Pakin.
+11 dict begin
+/FontInfo 11 dict dup begin
+/version ($fontversion) readonly def
+/Notice ($comment) readonly def
+/FullName ($fullname) readonly def
+/FamilyName ($familyname) readonly def
+/Weight ($weight) readonly def
+/ItalicAngle $italicangle def
+/isFixedPitch $fixedpitch def
+/UnderlinePosition $underlinepos def
+/UnderlineThickness $underlinethick def
+end readonly def
+/FontName /$fontname def
+ENDHEADER
+
+ # If we're not using an encoding that PostScript knows about, then
+ # create an encoding vector.
+ if ($encoding==\@standardencoding) {
+ print OUTFILE "/Encoding StandardEncoding def\n";
+ }
+ else {
+ print OUTFILE "/Encoding 256 array\n";
+ print OUTFILE "0 1 255 {1 index exch /.notdef put} for\n";
+ foreach my $charnum (0 .. $#{$encoding}) {
+ if ($encoding->[$charnum] && $encoding->[$charnum]!~/^_a\d+$/) {
+ print OUTFILE "dup $charnum /$encoding->[$charnum] put\n";
+ }
+ }
+ print OUTFILE "readonly def\n";
+ }
+
+ # Show the final boilerplate.
+ print OUTFILE <<"ENDHEADER";
+/PaintType 0 def
+/FontType 1 def
+/FontMatrix [0.001 0 0 0.001 0 0] readonly def
+/UniqueID $uniqueID def
+/FontBBox{@fontbbox}readonly def
+currentdict end
+currentfile eexec
+dup /Private 5 dict dup begin
+/RD{string currentfile exch readstring pop}executeonly def
+/ND{noaccess def}executeonly def
+/NP{noaccess put}executeonly def
+ENDHEADER
+}
+
+
+# Use MetaPost to generate one PostScript file per character. We
+# calculate the font bounding box from these characters and store them
+# in @fontbbox. If the input parameter is 1, set other font
+# parameters, too.
+sub get_bboxes ($)
+{
+ execute_command 1, ("mpost", "-mem=mf2pt1", "-progname=mpost",
+ "\\mode:=localfont; mag:=$mag; bpppix $bpppix; input $mffile");
+ opendir (CURDIR, ".") || die "${progname}: $! ($filedir)\n";
+ @charfiles = sort
+ { ($a=~ /\.(\d+)$/)[0] <=> ($b=~ /\.(\d+)$/)[0] }
+ grep /^$filebase.*\.\d+$/, readdir(CURDIR);
+ close CURDIR;
+ @fontbbox = (1000000, 1000000, -1000000, -1000000);
+ foreach my $psfile (@charfiles) {
+ # Read the character number from the output file's extension.
+ $psfile =~ /\.(\d+)$/;
+ my $charnum = $1;
+
+ # Process in turn each line of the current PostScript file.
+ my $havebbox = 0;
+ open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n";
+ while (<PSFILE>) {
+ my @tokens = split " ";
+ if ($tokens[0] eq "%%BoundingBox:") {
+ # Store the MetaPost-produced bounding box, just in case
+ # the given font doesn't use beginchar.
+ @tokens = ("%", "MF2PT1:", "glyph_dimensions", @tokens[1..4]);
+ $havebbox--;
+ }
+ next if $#tokens<1 || $tokens[1] ne "MF2PT1:";
+
+ # Process a "special" inserted into the generated PostScript.
+ MF2PT1_CMD:
+ {
+ # glyph_dimensions llx lly urx ury -- specified glyph dimensions
+ $tokens[2] eq "glyph_dimensions" && do {
+ my @bbox = @tokens[3..6];
+ $fontbbox[0]=$bbox[0] if $bbox[0]<$fontbbox[0];
+ $fontbbox[1]=$bbox[1] if $bbox[1]<$fontbbox[1];
+ $fontbbox[2]=$bbox[2] if $bbox[2]>$fontbbox[2];
+ $fontbbox[3]=$bbox[3] if $bbox[3]>$fontbbox[3];
+ $charbbox[$charnum] = \@bbox;
+ $havebbox++;
+ last MF2PT1_CMD;
+ };
+
+ # If all we want is the bounding box, exit the loop now.
+ last MF2PT1_CMD if !$_[0];
+
+ # glyph_name name -- glyph name
+ $tokens[2] eq "glyph_name" && do {
+ $glyphname[$charnum] = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # charwd wd -- character width as in TFM
+ $tokens[2] eq "charwd" && do {
+ $charwd[$charnum] = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_identifier name -- full font name
+ $tokens[2] eq "font_identifier" && do {
+ $fullname = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_size number -- font design size (pt, not bp)
+ $tokens[2] eq "font_size" && $tokens[3] && do {
+ $designsize = $tokens[3] * 72 / 72.27;
+ last MF2PT1_CMD;
+ };
+
+ # font_slant number -- italic amount
+ $tokens[2] eq "font_slant" && do {
+ $italicangle = 0 + rad2deg (atan(-$tokens[3]));
+ last MF2PT1_CMD;
+ };
+
+ # font_coding_scheme string -- font encoding
+ $tokens[2] eq "font_coding_scheme" && do {
+ $encoding = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_version string -- font version number (xxx.yyy)
+ $tokens[2] eq "font_version" && do {
+ $fontversion = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_comment string -- font comment notice
+ $tokens[2] eq "font_comment" && do {
+ $comment = join (" ", @tokens[3..$#tokens]);
+ last MF2PT1_CMD;
+ };
+
+ # font_family string -- font family name
+ $tokens[2] eq "font_family" && do {
+ $familyname = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_weight string -- font weight (e.g., "Book" or "Heavy")
+ $tokens[2] eq "font_weight" && do {
+ $weight = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_fixed_pitch number -- fixed width font (0=false, 1=true)
+ $tokens[2] eq "font_fixed_pitch" && do {
+ $fixedpitch = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_underline_position number -- vertical underline position
+ $tokens[2] eq "font_underline_position" && do {
+ # We store $underlinepos in points and later
+ # scale it by 1000/$designsize.
+ $underlinepos = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_underline_thickness number -- thickness of underline
+ $tokens[2] eq "font_underline_thickness" && do {
+ # We store $underlinethick in points and later
+ # scale it by 1000/$designsize.
+ $underlinethick = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_name string -- font name
+ $tokens[2] eq "font_name" && do {
+ $fontname = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_unique_id number (as string) -- globally unique font ID
+ $tokens[2] eq "font_unique_id" && do {
+ $uniqueID = 0+$tokens[3];
+ last MF2PT1_CMD;
+ };
+ }
+ }
+ close PSFILE;
+ if (!$havebbox) {
+ warn "${progname}: No beginchar in character $charnum; glyph dimensions are probably incorrect\n";
+ }
+ }
+}
+
+
+# Convert ordinary, MetaPost-produced PostScript files into Type 1
+# font programs.
+sub output_font_programs ()
+{
+ # Iterate over all the characters. We convert each one, line by
+ # line and token by token.
+ print "Converting PostScript graphics to Type 1 font programs...\n";
+ foreach my $psfile (@charfiles) {
+ # Initialize the font program.
+ $psfile =~ /\.(\d+)$/;
+ my $charnum = $1;
+ my $gname = $glyphname[$charnum] || $encoding->[$charnum];
+ my @fontprog;
+ push @fontprog, ("/$gname {",
+ frac_string (frac_approx ($charbbox[$charnum]->[0]),
+ frac_approx ($charwd[$charnum] * $mag))
+ . "hsbw");
+ my ($cpx, $cpy) =
+ ($charbbox[$charnum]->[0], 0); # Current point (PostScript)
+
+ # Iterate over every line in the current file.
+ open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n";
+ while (my $oneline=<PSFILE>) {
+ next if $oneline=~/^\%/;
+ next if $oneline=~/set/; # Fortunately, "set" never occurs on "good" lines.
+ my @arglist; # Arguments to current PostScript function
+
+ # Iterate over every token in the current line.
+ TOKENLOOP:
+ foreach my $token (split " ", $oneline) {
+ # Number: Round and push on the argument list.
+ $token =~ /^[-.\d]+$/ && do {
+ push @arglist, prec ($&);
+ next TOKENLOOP;
+ };
+
+ # curveto: Convert to vhcurveto, hvcurveto, or rrcurveto.
+ $token eq "curveto" && do {
+ my ($dx1, $dy1) = ($arglist[0] - $cpx,
+ $arglist[1] - $cpy);
+ my ($dx1n, $dx1d) = frac_approx ($dx1);
+ my ($dy1n, $dy1d) = frac_approx ($dy1);
+ $cpx += $dx1n / $dx1d;
+ $cpy += $dy1n / $dy1d;
+
+ my ($dx2, $dy2) = ($arglist[2] - $cpx,
+ $arglist[3] - $cpy);
+ my ($dx2n, $dx2d) = frac_approx ($dx2);
+ my ($dy2n, $dy2d) = frac_approx ($dy2);
+ $cpx += $dx2n / $dx2d;
+ $cpy += $dy2n / $dy2d;
+
+ my ($dx3, $dy3) = ($arglist[4] - $cpx,
+ $arglist[5] - $cpy);
+ my ($dx3n, $dx3d) = frac_approx ($dx3);
+ my ($dy3n, $dy3d) = frac_approx ($dy3);
+ $cpx += $dx3n / $dx3d;
+ $cpy += $dy3n / $dy3d;
+
+ if (!$dx1n && !$dy3n) {
+ push @fontprog, frac_string ($dy1n, $dy1d,
+ $dx2n, $dx2d,
+ $dy2n, $dy2d,
+ $dx3n, $dx3d)
+ . "vhcurveto";
+ }
+ elsif (!$dy1n && !$dx3n) {
+ push @fontprog, frac_string ($dx1n, $dx1d,
+ $dx2n, $dx2d,
+ $dy2n, $dy2d,
+ $dy3n, $dy3d)
+ . "hvcurveto";
+ }
+ else {
+ push @fontprog, frac_string ($dx1n, $dx1d,
+ $dy1n, $dy1d,
+ $dx2n, $dx2d,
+ $dy2n, $dy2d,
+ $dx3n, $dx3d,
+ $dy3n, $dy3d)
+ . "rrcurveto";
+ }
+ next TOKENLOOP;
+ };
+
+ # lineto: Convert to vlineto, hlineto, or rlineto.
+ $token eq "lineto" && do {
+ my ($dx, $dy) = ($arglist[0] - $cpx,
+ $arglist[1] - $cpy);
+ my ($dxn, $dxd) = frac_approx ($dx);
+ my ($dyn, $dyd) = frac_approx ($dy);
+ $cpx += $dxn / $dxd;
+ $cpy += $dyn / $dyd;
+
+ if (!$dxn) {
+ push @fontprog, frac_string ($dyn, $dyd)
+ . "vlineto" if $dyn;
+ }
+ elsif (!$dyn) {
+ push @fontprog, frac_string ($dxn, $dxd)
+ . "hlineto";
+ }
+ else {
+ push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd)
+ . "rlineto";
+ }
+ next TOKENLOOP;
+ };
+
+ # moveto: Convert to vmoveto, hmoveto, or rmoveto.
+ $token eq "moveto" && do {
+ my ($dx, $dy) = ($arglist[0] - $cpx,
+ $arglist[1] - $cpy);
+ my ($dxn, $dxd) = frac_approx ($dx);
+ my ($dyn, $dyd) = frac_approx ($dy);
+ $cpx += $dxn / $dxd;
+ $cpy += $dyn / $dyd;
+
+ if (!$dxn) {
+ push @fontprog, frac_string ($dyn, $dyd)
+ . "vmoveto";
+ }
+ elsif (!$dyn) {
+ push @fontprog, frac_string ($dxn, $dxd)
+ . "hmoveto";
+ }
+ else {
+ push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd)
+ . "rmoveto";
+ }
+ next TOKENLOOP;
+ };
+
+ # closepath: Output as is.
+ $token eq "closepath" && do {
+ push @fontprog, $token;
+ next TOKENLOOP;
+ };
+ }
+ }
+ close PSFILE;
+ push @fontprog, ("endchar",
+ "} ND");
+ print OUTFILE join ("\n\t", @fontprog), "\n";
+ }
+}
+
+
+# Output the final set of code for the Type 1 font.
+sub output_trailer ()
+{
+ print OUTFILE <<"ENDTRAILER";
+/.notdef {
+ 0 @{[$fontbbox[2]-$fontbbox[0]]} hsbw
+ endchar
+ } ND
+end
+end
+readonly put
+noaccess put
+dup/FontName get exch definefont pop
+mark currentfile closefile
+cleartomark
+ENDTRAILER
+}
+
+######################################################################
+
+# Parse the command line. Asterisks in the following represents
+# commands also defined by Plain Metafont.
+my %opthash = ();
+GetOptions (\%opthash,
+ "fontversion=s", # font_version
+ "comment=s", # font_comment
+ "family=s", # font_family
+ "weight=s", # font_weight
+ "fullname=s", # font_identifier (*)
+ "fixedpitch!", # font_fixed_pitch
+ "italicangle=f", # font_slant (*)
+ "underpos=f", # font_underline_position
+ "underthick=f", # font_underline_thickness
+ "name=s", # font_name
+ "uniqueid=i", # font_unique_id
+ "designsize=f", # font_size (*)
+ "encoding=s", # font_coding_scheme (*)
+ "rounding=f",
+ "bpppix=f",
+ "ffscript=s",
+ "h|help",
+ "V|version") || pod2usage(2);
+if (defined $opthash{"h"}) {
+ pod2usage(-verbose => 1,
+ -output => \*STDOUT, # Bug workaround for Pod::Usage
+ -exitval => "NOEXIT");
+ print "Please e-mail bug reports to scott+mf\@pakin.org.\n";
+ exit 1;
+}
+do {print $versionmsg; exit 1} if defined $opthash{"V"};
+pod2usage(2) if $#ARGV != 0;
+
+# Extract the filename from the command line.
+$mffile = $ARGV[0];
+my @fileparts = fileparse $mffile, ".mf";
+$filebase = $fileparts[0];
+$filedir = $fileparts[1];
+$filenoext = File::Spec->catfile ($filedir, $filebase);
+$pt1file = $filebase . ".pt1";
+$pfbfile = $filebase . ".pfb";
+
+assign_default $bpppix, $opthash{bpppix}, 0.02;
+
+# Make our first pass through the input, to set values for various options.
+$mag = 100; # Get a more precise bounding box.
+get_bboxes(1); # This might set $designsize.
+
+# Sanity-check the specified precision.
+assign_default $rounding, $opthash{rounding}, 1;
+if ($rounding<=0.0 || $rounding>1.0) {
+ die sprintf "%s: Invalid rounding amount \"%g\"; value must be a positive number no greater than 1.0\n", $progname, $rounding;
+}
+
+# Ensure that every user-definable parameter is assigned a value.
+assign_default $fontversion, $opthash{fontversion}, "001.000";
+assign_default $creationdate, scalar localtime;
+assign_default $comment, $opthash{comment}, "Font converted to Type 1 by mf2pt1, written by Scott Pakin.";
+assign_default $weight, $opthash{weight}, "Medium";
+assign_default $fixedpitch, $opthash{fixedpitch}, 0;
+assign_default $uniqueID, $opthash{uniqueid}, int(rand(1000000)) + 4000000;
+assign_default $designsize, $opthash{designsize};
+die "${progname}: a design size must be specified in $mffile or on the command line\n" if !defined $designsize;
+die "${progname}: the design size must be a positive number\n" if $designsize<=0.0;
+assign_default $underlinepos, $opthash{underpos}, -1;
+$underlinepos = round(1000*$underlinepos/$designsize);
+assign_default $underlinethick, $opthash{underthick}, 0.5;
+$underlinethick = round(1000*$underlinethick/$designsize);
+assign_default $fullname, $opthash{fullname}, $filebase;
+assign_default $familyname, $opthash{family}, $fullname;
+assign_default $italicangle, $opthash{italicangle}, 0;
+assign_default $fontname, $opthash{name}, "$familyname-$weight";
+$fontname =~ s/\s//g;
+assign_default $encoding, $opthash{encoding}, "standard";
+my $encoding_name = $encoding;
+ENCODING:
+{
+ if (-e $encoding) {
+ # Filenames take precedence over built-in encodings.
+ my @enc_array;
+ open (ENCFILE, "<$encoding") || die "${progname}: $! ($encoding)\n";
+ while (my $oneline = <ENCFILE>) {
+ $oneline =~ s/\%.*$//;
+ foreach my $word (split " ", $oneline) {
+ push @enc_array, substr($word, 1) if substr($word, 0, 1) eq "/";
+ }
+ }
+ close ENCFILE;
+ $encoding_name = substr (shift @enc_array, 1);
+ $encoding = \@enc_array;
+ last ENCODING;
+ }
+ $encoding=\@standardencoding, last ENCODING if $encoding eq "standard";
+ $encoding=\@isolatin1encoding, last ENCODING if $encoding eq "isolatin1";
+ $encoding=\@ot1encoding, last ENCODING if $encoding eq "ot1";
+ $encoding=\@t1encoding, last ENCODING if $encoding eq "t1";
+ $encoding=\@glyphname, last ENCODING if $encoding eq "asis";
+ warn "${progname}: Unknown encoding \"$encoding\"; using standard Adobe encoding\n";
+ $encoding=\@standardencoding; # Default to standard encoding
+}
+assign_default $fixedpitch, $opthash{fixedpitch}, 0;
+$fixedpitch = $fixedpitch ? "true" : "false";
+assign_default $ffscript, $opthash{ffscript};
+
+# Output the final values of all of our parameters.
+print "\n";
+print <<"PARAMVALUES";
+mf2pt1 is using the following font parameters:
+ font_version: $fontversion
+ font_comment: $comment
+ font_family: $familyname
+ font_weight: $weight
+ font_identifier: $fullname
+ font_fixed_pitch: $fixedpitch
+ font_slant: $italicangle
+ font_underline_position: $underlinepos
+ font_underline_thickness: $underlinethick
+ font_name: $fontname
+ font_unique_id: $uniqueID
+ font_size: $designsize (bp)
+ font_coding_scheme: $encoding_name
+PARAMVALUES
+ ;
+print "\n";
+
+# Scale by a factor of 1000/design size.
+$mag = 1000.0 / $designsize;
+get_bboxes(0);
+print "\n";
+
+# Output the font in disassembled format.
+open (OUTFILE, ">$pt1file") || die "${progname}: $! ($pt1file)\n";
+output_header();
+printf OUTFILE "2 index /CharStrings %d dict dup begin\n",
+ 1+scalar(grep {defined($_)} @charbbox);
+output_font_programs();
+output_trailer();
+close OUTFILE;
+unlink @charfiles;
+print "\n";
+
+# Convert from the disassembled font format to Type 1 binary format.
+if (!execute_command 0, ("t1asm", $pt1file, $pfbfile)) {
+ die "${progname}: You'll need either to install t1utils and rerun $progname or find another way to convert $pt1file to $pfbfile\n";
+ exit 1;
+}
+print "\n";
+unlink $pt1file;
+
+# Use FontForge to autohint the result.
+my $user_script = 0; # 1=script file was provided by the user; 0=created here
+if (defined $ffscript) {
+ # The user provided his own script.
+ $user_script = 1;
+}
+else {
+ # Create a FontForge script file.
+ $ffscript = $filebase . ".pe";
+ open (FFSCRIPT, ">$ffscript") || die "${progname}: $! ($ffscript)\n";
+ print FFSCRIPT <<'AUTOHINT';
+Open($1);
+SelectAll();
+RemoveOverlap();
+AddExtrema();
+Simplify(0, 2);
+CorrectDirection();
+Simplify(0, 2);
+RoundToInt();
+AutoHint();
+Generate($1);
+Quit(0);
+AUTOHINT
+ ;
+ close FFSCRIPT;
+}
+if (!execute_command 0, ("fontforge", "-script", $ffscript, $pfbfile)) {
+ warn "${progname}: You'll need to install FontForge if you want $pfbfile autohinted (not required, but strongly recommended)\n";
+}
+unlink $ffscript if !$user_script;
+print "\n";
+
+# Finish up.
+print "*** Successfully generated $pfbfile! ***\n";
+exit 0;
+
+######################################################################
+
+__END__
+
+=head1 NAME
+
+mf2pt1 - produce a PostScript Type 1 font program from a Metafont source
+
+
+=head1 SYNOPSIS
+
+mf2pt1
+[B<--help>]
+[B<--version>]
+[B<--comment>=I<string>]
+[B<--designsize>=I<number>]
+[B<--encoding>=I<encoding>]
+[B<--family>=I<name>]
+[B<-->[B<no>]B<fixedpitch>]
+[B<--fontversion>=I<MMM.mmm>]
+[B<--fullname>=I<name>]
+[B<--italicangle>=I<number>]
+[B<--name>=I<name>]
+[B<--underpos>=I<number>]
+[B<--underthick>=I<number>]
+[B<--uniqueid>=I<number>]
+[B<--weight>=I<weight>]
+[B<--rounding>=I<number>]
+[B<--bpppix>=I<number>]
+[B<--ffscript>=I<file.pe>]
+I<infile>.mf
+
+
+=head1 WARNING
+
+The B<mf2pt1> Info file is the main source of documentation for
+B<mf2pt1>. This man page is merely a brief summary.
+
+
+=head1 DESCRIPTION
+
+B<mf2pt1> facilitates producing PostScript Type 1 fonts from a
+Metafont source file. It is I<not>, as the name may imply, an
+automatic converter of arbitrary Metafont fonts to Type 1 format.
+B<mf2pt1> imposes a number of restrictions on the Metafont input. If
+these restrictions are met, B<mf2pt1> will produce valid Type 1
+output. (Actually, it produces "disassembled" Type 1; the B<t1asm>
+program from the B<t1utils> suite will convert this to a true Type 1
+font.)
+
+=head2 Usage
+
+ mf2pt1 myfont.mf
+
+=head1 OPTIONS
+
+Font parameters are best specified within a Metafont program. If
+necessary, though, command-line options can override any of these
+parameters. The B<mf2pt1> Info page, the primary source of B<mf2pt1>
+documentation, describes the following in greater detail.
+
+=over 4
+
+=item B<--help>
+
+Provide help on B<mf2pt1>'s command-line options.
+
+=item B<--version>
+
+Output the B<mf2pt1> version number, copyright, and license.
+
+=item B<--comment>=I<string>
+
+Include a font comment, usually a copyright notice.
+
+=item B<--designsize>=I<number>
+
+Specify the font design size in points.
+
+=item B<--encoding>=I<encoding>
+
+Designate the font encoding, either the name of a---typically
+F<.enc>---file which contains a PostScript font-encoding vector or one
+of C<standard> (the default), C<ot1>, C<t1>, or C<isolatin1>.
+
+=item B<--family>=I<name>
+
+Specify the font family.
+
+=item B<--fixedpitch>, B<--nofixedpitch>
+
+Assert that the font uses either monospaced (B<--fixedpitch>) or
+proportional (B<--nofixedpitch>) character widths.
+
+=item B<--fontversion>=I<MMM.mmm>
+
+Specify the font's major and minor version number.
+
+=item B<--fullname>=I<name>
+
+Designate the full font name (family plus modifiers).
+
+=item B<--italicangle>=I<number>
+
+Designate the italic angle in degrees counterclockwise from vertical.
+
+=item B<--name>=I<name>
+
+Provide the font name.
+
+=item B<--underpos>=I<number>
+
+Specify the vertical position of the underline in thousandths of the
+font height.
+
+=item B<--underthick>=I<number>
+
+Specify the thickness of the underline in thousandths of the font
+height.
+
+=item B<--uniqueid>=I<number>
+
+Specify a globally unique font identifier.
+
+=item B<--weight>=I<weight>
+
+Provide a description of the font weight (e.g., ``Heavy'').
+
+=item B<--rounding>=I<number>
+
+Specify the fraction of a font unit (0.0 < I<number> <= 1.0) to which
+to round coordinate values [default: 1.0].
+
+=item B<--bpppix>=I<number>
+
+Redefine the number of big points per pixel from 0.02 to I<number>.
+
+=item B<--ffscript>=I<file.pe>
+
+Name a script to pass to FontForge.
+
+=back
+
+
+=head1 FILES
+
+F<mf2pt1.mem> (which is generated from F<mf2pt1.mp> and F<mfplain.mp>)
+
+
+=head1 NOTES
+
+As stated in L</"WARNING">, the complete source of documentation for
+B<mf2pt1> is the Info page, not this man page.
+
+
+=head1 SEE ALSO
+
+mf(1), mpost(1), t1asm(1), fontforge(1)
+
+
+=head1 AUTHOR
+
+Scott Pakin, I<scott+mf@pakin.org>
--- /dev/null
+#!@PYTHON@
+# mutopia-index.py
+
+import fnmatch
+import getopt
+import os
+import re
+import stat
+import sys
+
+def find (pat, dir):
+ f = os.popen ('find %s -name "%s"'% (dir, pat))
+ lst = []
+ for a in f.readlines():
+ a = a[:-1]
+ lst.append (a)
+ return lst
+
+
+junk_prefix = 'out-www/'
+
+headertext= r"""
+
+<h1>LilyPond samples</h1>
+
+
+<p>You are looking at a page with some LilyPond samples. These files
+are also included in the distribution. The output is completely
+generated from the source file, without any further touch up.
+
+<p>
+
+The pictures are 90 dpi anti-aliased snapshots of the printed output.
+For a good impression of the quality print out the PDF file.
+"""
+
+headertext_nopics= r"""
+<p>No examples were found in this directory.
+"""
+
+#
+# FIXME breaks on multiple strings.
+#
+def read_lilypond_header (fn):
+ s = open (fn).read ()
+ s = re.sub ('%.*$', '', s)
+ s = re.sub ('\n', ' ', s)
+
+ dict = {}
+ m = re.search (r"""\\header\s*{([^}]*)}""", s)
+
+ if m:
+ s = m.group (1)
+ else:
+ return dict
+
+ while s:
+ m = re.search (r'''\s*(\S+)\s*=\s*"([^"]+)"''', s)
+ if m == None:
+ s = ''
+ else:
+ s = s[m.end (0):]
+ left = m.group (1)
+ right = m.group (2)
+
+ left = re.sub ('"', '', left)
+ right = re.sub ('"', '', right)
+ dict[left] = right
+
+ return dict
+
+def help ():
+ sys.stdout.write (r'''Usage: mutopia-index [OPTIONS] INFILE OUTFILE
+Generate index for mutopia.
+
+Options:
+ -h, --help print this help
+ -o, --output=FILE write output to file
+ -s, --subdirs=DIR add subdir
+ --suffix=SUF specify suffix
+
+''')
+ sys.exit (0)
+
+# ugh.
+def gen_list (inputs, file_name):
+ sys.stderr.write ("generating HTML list %s" % file_name)
+ sys.stderr.write ('\n')
+ if file_name:
+ list = open (file_name, 'w')
+ else:
+ list = sys.stdout
+ list.write ('''<html><head><title>Rendered Examples</title>
+<style type="text/css">
+hr { border:0; height:1; color: #000000; background-color: #000000; }\n
+</style>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+</head>''')
+
+ list.write ('<body bgcolor=white>\n')
+
+ if inputs:
+ list.write (headertext)
+ else:
+ list.write (headertext_nopics)
+
+ for ex in inputs:
+ print ex
+
+ (base, ext) = os.path.splitext (ex)
+ (base, ext2) = os.path.splitext (base)
+ ext = ext2 + ext
+
+ header = read_lilypond_header (ex)
+ head = header.get ('title', os.path.basename (base))
+ composer = header.get ('composer', '')
+ desc = header.get ('description', '')
+ list.write ('<hr>\n')
+ list.write ('<h1>%s</h1>\n' % head);
+ if composer:
+ list.write ('<h2>%s</h2>\n' % composer)
+ if desc:
+ list.write ('%s<p>' % desc)
+ list.write ('<ul>\n')
+
+ def list_item (file_name, desc, type, lst = list):
+ if os.path.isfile (file_name):
+ lst.write ('<li><a href="%s">%s</a>'
+ % (re.sub (junk_prefix, '', file_name), desc))
+
+ # FIXME: include warning if it uses \include
+ # files.
+
+ size = os.stat (file_name)[stat.ST_SIZE]
+ kB = (size + 512) / 1024
+ if kB:
+ lst.write (' (%s %d kB)' % (type, kB))
+ else:
+ lst.write (' (%s %d characters)'
+ % (type, size))
+ pictures = ['jpeg', 'png', 'xpm']
+ lst.write ('\n')
+ else:
+ print "cannot find" , `file_name`
+
+ list_item (base + ext, 'The input', 'ASCII')
+
+ pages_found = 0
+ for page in range (1, 100):
+ f = base + '-page%d.png' % page
+
+ if not os.path.isfile (f):
+ break
+ pages_found += 1
+ list_item (f, 'See a picture of page %d' % page, 'png')
+
+ if pages_found == 0 and os.path.exists (base + '.png'):
+ list_item (base + ".png",
+ 'See a picture', 'png')
+
+
+ list_item (base + '.pdf', 'Print', 'PDF')
+ list_item (base + '.midi', 'Listen', 'MIDI')
+ list.write ('</ul>\n');
+
+ list.write ('</body></html>\n');
+ list.close ()
+
+(options, files) = getopt.getopt (sys.argv[1:],
+ 'ho:', ['help', 'output='])
+outfile = 'examples.html'
+
+subdirs = []
+for (o, a) in options:
+ if o == '--help' or o == '-h':
+ help ()
+ elif o == '--output' or o == '-o':
+ outfile = a
+
+dirs = []
+for f in files:
+ dirs += find ('out-www', f)
+
+if not dirs:
+ dirs = ['.']
+
+allfiles = []
+
+for d in dirs:
+ allfiles += find ('*.ly', d)
+
+allfiles = [f for f in allfiles
+ if not f.endswith ('snippet-map.ly')
+ and not re.search ('lily-[0-9a-f]+', f)
+ and 'musicxml' not in f]
+
+gen_list (allfiles, outfile)
--- /dev/null
+#!@PYTHON@
+import sys
+import optparse
+import os
+import math
+
+## so we can call directly as scripts/build/output-distance.py
+me_path = os.path.abspath (os.path.split (sys.argv[0])[0])
+sys.path.insert (0, me_path + '/../python/')
+sys.path.insert (0, me_path + '/../python/out/')
+
+
+X_AXIS = 0
+Y_AXIS = 1
+INFTY = 1e6
+
+OUTPUT_EXPRESSION_PENALTY = 1
+ORPHAN_GROB_PENALTY = 1
+options = None
+
+################################################################
+# system interface.
+temp_dir = None
+class TempDirectory:
+ def __init__ (self):
+ import tempfile
+ self.dir = tempfile.mkdtemp ()
+ print 'dir is', self.dir
+ def __del__ (self):
+ print 'rm -rf %s' % self.dir
+ os.system ('rm -rf %s' % self.dir)
+ def __call__ (self):
+ return self.dir
+
+
+def get_temp_dir ():
+ global temp_dir
+ if not temp_dir:
+ temp_dir = TempDirectory ()
+ return temp_dir ()
+
+def read_pipe (c):
+ print 'pipe' , c
+ return os.popen (c).read ()
+
+def system (c):
+ print 'system' , c
+ s = os.system (c)
+ if s :
+ raise Exception ("failed")
+ return
+
+def shorten_string (s):
+ threshold = 15
+ if len (s) > 2*threshold:
+ s = s[:threshold] + '..' + s[-threshold:]
+ return s
+
+def max_distance (x1, x2):
+ dist = 0.0
+
+ for (p,q) in zip (x1, x2):
+ dist = max (abs (p-q), dist)
+
+ return dist
+
+
+def compare_png_images (old, new, dest_dir):
+ def png_dims (f):
+ m = re.search ('([0-9]+) x ([0-9]+)', read_pipe ('file %s' % f))
+
+ return tuple (map (int, m.groups ()))
+
+ dest = os.path.join (dest_dir, new.replace ('.png', '.compare.jpeg'))
+ try:
+ dims1 = png_dims (old)
+ dims2 = png_dims (new)
+ except AttributeError:
+ ## hmmm. what to do?
+ system ('touch %(dest)s' % locals ())
+ return
+
+ dims = (min (dims1[0], dims2[0]),
+ min (dims1[1], dims2[1]))
+
+ dir = get_temp_dir ()
+ system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop1.png' % (dims + (old, dir)))
+ system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop2.png' % (dims + (new, dir)))
+
+ system ('compare -depth 8 %(dir)s/crop1.png %(dir)s/crop2.png %(dir)s/diff.png' % locals ())
+
+ system ("convert -depth 8 %(dir)s/diff.png -blur 0x3 -negate -channel alpha,blue -type TrueColorMatte -fx 'intensity' %(dir)s/matte.png" % locals ())
+
+ system ("composite -compose atop -quality 65 %(dir)s/matte.png %(new)s %(dest)s" % locals ())
+
+
+################################################################
+# interval/bbox arithmetic.
+
+empty_interval = (INFTY, -INFTY)
+empty_bbox = (empty_interval, empty_interval)
+
+def interval_is_empty (i):
+ return i[0] > i[1]
+
+def interval_length (i):
+ return max (i[1]-i[0], 0)
+
+def interval_union (i1, i2):
+ return (min (i1[0], i2[0]),
+ max (i1[1], i2[1]))
+
+def interval_intersect (i1, i2):
+ return (max (i1[0], i2[0]),
+ min (i1[1], i2[1]))
+
+def bbox_is_empty (b):
+ return (interval_is_empty (b[0])
+ or interval_is_empty (b[1]))
+
+def bbox_union (b1, b2):
+ return (interval_union (b1[X_AXIS], b2[X_AXIS]),
+ interval_union (b2[Y_AXIS], b2[Y_AXIS]))
+
+def bbox_intersection (b1, b2):
+ return (interval_intersect (b1[X_AXIS], b2[X_AXIS]),
+ interval_intersect (b2[Y_AXIS], b2[Y_AXIS]))
+
+def bbox_area (b):
+ return interval_length (b[X_AXIS]) * interval_length (b[Y_AXIS])
+
+def bbox_diameter (b):
+ return max (interval_length (b[X_AXIS]),
+ interval_length (b[Y_AXIS]))
+
+
+def difference_area (a, b):
+ return bbox_area (a) - bbox_area (bbox_intersection (a,b))
+
+class GrobSignature:
+ def __init__ (self, exp_list):
+ (self.name, self.origin, bbox_x,
+ bbox_y, self.output_expression) = tuple (exp_list)
+
+ self.bbox = (bbox_x, bbox_y)
+ self.centroid = (bbox_x[0] + bbox_x[1], bbox_y[0] + bbox_y[1])
+
+ def __repr__ (self):
+ return '%s: (%.2f,%.2f), (%.2f,%.2f)\n' % (self.name,
+ self.bbox[0][0],
+ self.bbox[0][1],
+ self.bbox[1][0],
+ self.bbox[1][1])
+
+ def axis_centroid (self, axis):
+ return apply (sum, self.bbox[axis]) / 2
+
+ def centroid_distance (self, other, scale):
+ return max_distance (self.centroid, other.centroid) / scale
+
+ def bbox_distance (self, other):
+ divisor = bbox_area (self.bbox) + bbox_area (other.bbox)
+
+ if divisor:
+ return (difference_area (self.bbox, other.bbox) +
+ difference_area (other.bbox, self.bbox)) / divisor
+ else:
+ return 0.0
+
+ def expression_distance (self, other):
+ if self.output_expression == other.output_expression:
+ return 0
+ else:
+ return 1
+
+################################################################
+# single System.
+
+class SystemSignature:
+ def __init__ (self, grob_sigs):
+ d = {}
+ for g in grob_sigs:
+ val = d.setdefault (g.name, [])
+ val += [g]
+
+ self.grob_dict = d
+ self.set_all_bbox (grob_sigs)
+
+ def set_all_bbox (self, grobs):
+ self.bbox = empty_bbox
+ for g in grobs:
+ self.bbox = bbox_union (g.bbox, self.bbox)
+
+ def closest (self, grob_name, centroid):
+ min_d = INFTY
+ min_g = None
+ try:
+ grobs = self.grob_dict[grob_name]
+
+ for g in grobs:
+ d = max_distance (g.centroid, centroid)
+ if d < min_d:
+ min_d = d
+ min_g = g
+
+
+ return min_g
+
+ except KeyError:
+ return None
+ def grobs (self):
+ return reduce (lambda x,y: x+y, self.grob_dict.values(), [])
+
+################################################################
+## comparison of systems.
+
+class SystemLink:
+ def __init__ (self, system1, system2):
+ self.system1 = system1
+ self.system2 = system2
+
+ self.link_list_dict = {}
+ self.back_link_dict = {}
+
+
+ ## pairs
+ self.orphans = []
+
+ ## pair -> distance
+ self.geo_distances = {}
+
+ ## pairs
+ self.expression_changed = []
+
+ self._geometric_distance = None
+ self._expression_change_count = None
+ self._orphan_count = None
+
+ for g in system1.grobs ():
+
+ ## skip empty bboxes.
+ if bbox_is_empty (g.bbox):
+ continue
+
+ closest = system2.closest (g.name, g.centroid)
+
+ self.link_list_dict.setdefault (closest, [])
+ self.link_list_dict[closest].append (g)
+ self.back_link_dict[g] = closest
+
+
+ def calc_geometric_distance (self):
+ total = 0.0
+ for (g1,g2) in self.back_link_dict.items ():
+ if g2:
+ d = g1.bbox_distance (g2)
+ if d:
+ self.geo_distances[(g1,g2)] = d
+
+ total += d
+
+ self._geometric_distance = total
+
+ def calc_orphan_count (self):
+ count = 0
+ for (g1, g2) in self.back_link_dict.items ():
+ if g2 == None:
+ self.orphans.append ((g1, None))
+
+ count += 1
+
+ self._orphan_count = count
+
+ def calc_output_exp_distance (self):
+ d = 0
+ for (g1,g2) in self.back_link_dict.items ():
+ if g2:
+ d += g1.expression_distance (g2)
+
+ self._expression_change_count = d
+
+ def output_expression_details_string (self):
+ return ', '.join ([g1.name for g1 in self.expression_changed])
+
+ def geo_details_string (self):
+ results = [(d, g1,g2) for ((g1, g2), d) in self.geo_distances.items()]
+ results.sort ()
+ results.reverse ()
+
+ return ', '.join (['%s: %f' % (g1.name, d) for (d, g1, g2) in results])
+
+ def orphan_details_string (self):
+ return ', '.join (['%s-None' % g1.name for (g1,g2) in self.orphans if g2==None])
+
+ def geometric_distance (self):
+ if self._geometric_distance == None:
+ self.calc_geometric_distance ()
+ return self._geometric_distance
+
+ def orphan_count (self):
+ if self._orphan_count == None:
+ self.calc_orphan_count ()
+
+ return self._orphan_count
+
+ def output_expression_change_count (self):
+ if self._expression_change_count == None:
+ self.calc_output_exp_distance ()
+ return self._expression_change_count
+
+ def distance (self):
+ return (self.output_expression_change_count (),
+ self.orphan_count (),
+ self.geometric_distance ())
+
+def read_signature_file (name):
+ print 'reading', name
+
+ entries = open (name).read ().split ('\n')
+ def string_to_tup (s):
+ return tuple (map (float, s.split (' ')))
+
+ def string_to_entry (s):
+ fields = s.split('@')
+ fields[2] = string_to_tup (fields[2])
+ fields[3] = string_to_tup (fields[3])
+
+ return tuple (fields)
+
+ entries = [string_to_entry (e) for e in entries
+ if e and not e.startswith ('#')]
+
+ grob_sigs = [GrobSignature (e) for e in entries]
+ sig = SystemSignature (grob_sigs)
+ return sig
+
+
+################################################################
+# different systems of a .ly file.
+
+hash_to_original_name = {}
+
+class FileLink:
+ def __init__ (self, f1, f2):
+ self._distance = None
+ self.file_names = (f1, f2)
+
+ def text_record_string (self):
+ return '%-30f %-20s\n' % (self.distance (),
+ self.name ()
+ + os.path.splitext (self.file_names[1])[1]
+ )
+
+ def calc_distance (self):
+ return 0.0
+
+ def distance (self):
+ if self._distance == None:
+ self._distance = self.calc_distance ()
+
+ return self._distance
+
+ def source_file (self):
+ for ext in ('.ly', '.ly.txt'):
+ base = os.path.splitext (self.file_names[1])[0]
+ f = base + ext
+ if os.path.exists (f):
+ return f
+
+ return ''
+
+ def name (self):
+ base = os.path.basename (self.file_names[1])
+ base = os.path.splitext (base)[0]
+ base = hash_to_original_name.get (base, base)
+ base = os.path.splitext (base)[0]
+ return base
+
+ def extension (self):
+ return os.path.splitext (self.file_names[1])[1]
+
+ def link_files_for_html (self, dest_dir):
+ for f in self.file_names:
+ link_file (f, os.path.join (dest_dir, f))
+
+ def get_distance_details (self):
+ return ''
+
+ def get_cell (self, oldnew):
+ return ''
+
+ def get_file (self, oldnew):
+ return self.file_names[oldnew]
+
+ def html_record_string (self, dest_dir):
+ dist = self.distance()
+
+ details = self.get_distance_details ()
+ if details:
+ details_base = os.path.splitext (self.file_names[1])[0]
+ details_base += '.details.html'
+ fn = dest_dir + '/' + details_base
+ open_write_file (fn).write (details)
+
+ details = '<br>(<a href="%(details_base)s">details</a>)' % locals ()
+
+ cell1 = self.get_cell (0)
+ cell2 = self.get_cell (1)
+
+ name = self.name () + self.extension ()
+ file1 = self.get_file (0)
+ file2 = self.get_file (1)
+
+ return '''<tr>
+<td>
+%(dist)f
+%(details)s
+</td>
+<td>%(cell1)s<br><font size=-2><a href="%(file1)s"><tt>%(name)s</tt></font></td>
+<td>%(cell2)s<br><font size=-2><a href="%(file2)s"><tt>%(name)s</tt></font></td>
+</tr>''' % locals ()
+
+
+class FileCompareLink (FileLink):
+ def __init__ (self, f1, f2):
+ FileLink.__init__ (self, f1, f2)
+ self.contents = (self.get_content (self.file_names[0]),
+ self.get_content (self.file_names[1]))
+
+
+ def calc_distance (self):
+ ## todo: could use import MIDI to pinpoint
+ ## what & where changed.
+
+ if self.contents[0] == self.contents[1]:
+ return 0.0
+ else:
+ return 100.0;
+
+ def get_content (self, f):
+ print 'reading', f
+ s = open (f).read ()
+ return s
+
+
+class GitFileCompareLink (FileCompareLink):
+ def get_cell (self, oldnew):
+ str = self.contents[oldnew]
+
+ # truncate long lines
+ str = '\n'.join ([l[:80] for l in str.split ('\n')])
+
+
+ str = '<font size="-2"><pre>%s</pre></font>' % str
+ return str
+
+ def calc_distance (self):
+ if self.contents[0] == self.contents[1]:
+ d = 0.0
+ else:
+ d = 1.0001 *options.threshold
+
+ return d
+
+
+class TextFileCompareLink (FileCompareLink):
+ def calc_distance (self):
+ import difflib
+ diff = difflib.unified_diff (self.contents[0].strip().split ('\n'),
+ self.contents[1].strip().split ('\n'),
+ fromfiledate = self.file_names[0],
+ tofiledate = self.file_names[1]
+ )
+
+ self.diff_lines = [l for l in diff]
+ self.diff_lines = self.diff_lines[2:]
+
+ return math.sqrt (float (len ([l for l in self.diff_lines if l[0] in '-+'])))
+
+ def get_cell (self, oldnew):
+ str = ''
+ if oldnew == 1:
+ str = '\n'.join ([d.replace ('\n','') for d in self.diff_lines])
+ str = '<font size="-2"><pre>%s</pre></font>' % str
+ return str
+
+class LogFileCompareLink (TextFileCompareLink):
+ def get_content (self, f):
+ c = TextFileCompareLink.get_content (self, f)
+ c = re.sub ("\nProcessing `[^\n]+'\n", '', c)
+ return c
+
+class ProfileFileLink (FileCompareLink):
+ def __init__ (self, f1, f2):
+ FileCompareLink.__init__ (self, f1, f2)
+ self.results = [{}, {}]
+
+ def get_cell (self, oldnew):
+ str = ''
+ for k in ('time', 'cells'):
+ if oldnew==0:
+ str += '%-8s: %d\n' % (k, int (self.results[oldnew][k]))
+ else:
+ str += '%-8s: %8d (%5.3f)\n' % (k, int (self.results[oldnew][k]),
+ self.get_ratio (k))
+
+ return '<pre>%s</pre>' % str
+
+ def get_ratio (self, key):
+ (v1,v2) = (self.results[0].get (key, -1),
+ self.results[1].get (key, -1))
+
+ if v1 <= 0 or v2 <= 0:
+ return 0.0
+
+ return (v1 - v2) / float (v1+v2)
+
+ def calc_distance (self):
+ for oldnew in (0,1):
+ def note_info (m):
+ self.results[oldnew][m.group(1)] = float (m.group (2))
+
+ re.sub ('([a-z]+): ([-0-9.]+)\n',
+ note_info, self.contents[oldnew])
+
+ dist = 0.0
+ factor = {
+ 'time': 0.1,
+ 'cells': 5.0,
+ }
+
+ for k in ('time', 'cells'):
+ real_val = math.tan (self.get_ratio (k) * 0.5 * math.pi)
+ dist += math.exp (math.fabs (real_val) * factor[k]) - 1
+
+ dist = min (dist, 100)
+ return dist
+
+
+class MidiFileLink (TextFileCompareLink):
+ def get_content (self, oldnew):
+ import midi
+
+ data = FileCompareLink.get_content (self, oldnew)
+ midi = midi.parse (data)
+ tracks = midi[1]
+
+ str = ''
+ j = 0
+ for t in tracks:
+ str += 'track %d' % j
+ j += 1
+
+ for e in t:
+ ev_str = repr (e)
+ if re.search ('LilyPond [0-9.]+', ev_str):
+ continue
+
+ str += ' ev %s\n' % `e`
+ return str
+
+
+
+class SignatureFileLink (FileLink):
+ def __init__ (self, f1, f2 ):
+ FileLink.__init__ (self, f1, f2)
+ self.system_links = {}
+
+ def add_system_link (self, link, number):
+ self.system_links[number] = link
+
+ def calc_distance (self):
+ d = 0.0
+
+ orphan_distance = 0.0
+ for l in self.system_links.values ():
+ d = max (d, l.geometric_distance ())
+ orphan_distance += l.orphan_count ()
+
+ return d + orphan_distance
+
+ def add_file_compare (self, f1, f2):
+ system_index = []
+
+ def note_system_index (m):
+ system_index.append (int (m.group (1)))
+ return ''
+
+ base1 = re.sub ("-([0-9]+).signature", note_system_index, f1)
+ base2 = re.sub ("-([0-9]+).signature", note_system_index, f2)
+
+ self.base_names = (os.path.normpath (base1),
+ os.path.normpath (base2))
+
+ s1 = read_signature_file (f1)
+ s2 = read_signature_file (f2)
+
+ link = SystemLink (s1, s2)
+
+ self.add_system_link (link, system_index[0])
+
+
+ def create_images (self, dest_dir):
+
+ files_created = [[], []]
+ for oldnew in (0, 1):
+ pat = self.base_names[oldnew] + '.eps'
+
+ for f in glob.glob (pat):
+ infile = f
+ outfile = (dest_dir + '/' + f).replace ('.eps', '.png')
+ data_option = ''
+ if options.local_data_dir:
+ data_option = ('-slilypond-datadir=%s/../share/lilypond/current '
+ % os.path.dirname(infile))
+
+ mkdir (os.path.split (outfile)[0])
+ cmd = ('gs -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4 '
+ ' %(data_option)s '
+ ' -r101 '
+ ' -sOutputFile=%(outfile)s -dNOSAFER -dEPSCrop -q -dNOPAUSE '
+ ' %(infile)s -c quit ') % locals ()
+
+ files_created[oldnew].append (outfile)
+ system (cmd)
+
+ return files_created
+
+ def link_files_for_html (self, dest_dir):
+ FileLink.link_files_for_html (self, dest_dir)
+ to_compare = [[], []]
+
+ exts = []
+ if options.create_images:
+ to_compare = self.create_images (dest_dir)
+ else:
+ exts += ['.png', '-page*png']
+
+ for ext in exts:
+ for oldnew in (0,1):
+ for f in glob.glob (self.base_names[oldnew] + ext):
+ dst = dest_dir + '/' + f
+ link_file (f, dst)
+
+ if f.endswith ('.png'):
+ to_compare[oldnew].append (f)
+
+ if options.compare_images:
+ for (old, new) in zip (to_compare[0], to_compare[1]):
+ compare_png_images (old, new, dest_dir)
+
+
+ def get_cell (self, oldnew):
+ def img_cell (ly, img, name):
+ if not name:
+ name = 'source'
+ else:
+ name = '<tt>%s</tt>' % name
+
+ return '''
+<a href="%(img)s">
+<img src="%(img)s" style="border-style: none; max-width: 500px;">
+</a><br>
+''' % locals ()
+ def multi_img_cell (ly, imgs, name):
+ if not name:
+ name = 'source'
+ else:
+ name = '<tt>%s</tt>' % name
+
+ imgs_str = '\n'.join (['''<a href="%s">
+<img src="%s" style="border-style: none; max-width: 500px;">
+</a><br>''' % (img, img)
+ for img in imgs])
+
+
+ return '''
+%(imgs_str)s
+''' % locals ()
+
+
+
+ def cell (base, name):
+ pat = base + '-page*.png'
+ pages = glob.glob (pat)
+
+ if pages:
+ return multi_img_cell (base + '.ly', sorted (pages), name)
+ else:
+ return img_cell (base + '.ly', base + '.png', name)
+
+
+
+ str = cell (os.path.splitext (self.file_names[oldnew])[0], self.name ())
+ if options.compare_images and oldnew == 1:
+ str = str.replace ('.png', '.compare.jpeg')
+
+ return str
+
+
+ def get_distance_details (self):
+ systems = self.system_links.items ()
+ systems.sort ()
+
+ html = ""
+ for (c, link) in systems:
+ e = '<td>%d</td>' % c
+ for d in link.distance ():
+ e += '<td>%f</td>' % d
+
+ e = '<tr>%s</tr>' % e
+
+ html += e
+
+ e = '<td>%d</td>' % c
+ for s in (link.output_expression_details_string (),
+ link.orphan_details_string (),
+ link.geo_details_string ()):
+ e += "<td>%s</td>" % s
+
+
+ e = '<tr>%s</tr>' % e
+ html += e
+
+ original = self.name ()
+ html = '''<html>
+<head>
+<title>comparison details for %(original)s</title>
+</head>
+<body>
+<table border=1>
+<tr>
+<th>system</th>
+<th>output</th>
+<th>orphan</th>
+<th>geo</th>
+</tr>
+
+%(html)s
+</table>
+
+</body>
+</html>
+''' % locals ()
+ return html
+
+
+################################################################
+# Files/directories
+
+import glob
+import re
+
+def compare_signature_files (f1, f2):
+ s1 = read_signature_file (f1)
+ s2 = read_signature_file (f2)
+
+ return SystemLink (s1, s2).distance ()
+
+def paired_files (dir1, dir2, pattern):
+ """
+ Search DIR1 and DIR2 for PATTERN.
+
+ Return (PAIRED, MISSING-FROM-2, MISSING-FROM-1)
+
+ """
+
+ files = []
+ for d in (dir1,dir2):
+ found = [os.path.split (f)[1] for f in glob.glob (d + '/' + pattern)]
+ found = dict ((f, 1) for f in found)
+ files.append (found)
+
+ pairs = []
+ missing = []
+ for f in files[0]:
+ try:
+ files[1].pop (f)
+ pairs.append (f)
+ except KeyError:
+ missing.append (f)
+
+ return (pairs, files[1].keys (), missing)
+
+class ComparisonData:
+ def __init__ (self):
+ self.result_dict = {}
+ self.missing = []
+ self.added = []
+ self.file_links = {}
+
+ def read_sources (self):
+
+ ## ugh: drop the .ly.txt
+ for (key, val) in self.file_links.items ():
+
+ def note_original (match, ln=val):
+ key = ln.name ()
+ hash_to_original_name[key] = match.group (1)
+ return ''
+
+ sf = val.source_file ()
+ if sf:
+ re.sub (r'\\sourcefilename "([^"]+)"',
+ note_original, open (sf).read ())
+ else:
+ print 'no source for', val
+
+ def compare_trees (self, dir1, dir2):
+ self.compare_directories (dir1, dir2)
+
+ (root, dirs, files) = os.walk (dir1).next ()
+ for d in dirs:
+ d1 = os.path.join (dir1, d)
+ d2 = os.path.join (dir2, d)
+
+ if os.path.islink (d1) or os.path.islink (d2):
+ continue
+
+ if os.path.isdir (d2):
+ self.compare_trees (d1, d2)
+
+ def compare_directories (self, dir1, dir2):
+ for ext in ['signature',
+ 'midi',
+ 'log',
+ 'profile',
+ 'gittxt']:
+ (paired, m1, m2) = paired_files (dir1, dir2, '*.' + ext)
+
+ self.missing += [(dir1, m) for m in m1]
+ self.added += [(dir2, m) for m in m2]
+
+ for p in paired:
+ if (options.max_count
+ and len (self.file_links) > options.max_count):
+ continue
+
+ f2 = dir2 + '/' + p
+ f1 = dir1 + '/' + p
+ self.compare_files (f1, f2)
+
+ def compare_files (self, f1, f2):
+ if f1.endswith ('signature'):
+ self.compare_signature_files (f1, f2)
+ else:
+ ext = os.path.splitext (f1)[1]
+ klasses = {
+ '.midi': MidiFileLink,
+ '.log' : LogFileCompareLink,
+ '.profile': ProfileFileLink,
+ '.gittxt': GitFileCompareLink,
+ }
+
+ if klasses.has_key (ext):
+ self.compare_general_files (klasses[ext], f1, f2)
+
+ def compare_general_files (self, klass, f1, f2):
+ name = os.path.split (f1)[1]
+
+ file_link = klass (f1, f2)
+ self.file_links[name] = file_link
+
+ def compare_signature_files (self, f1, f2):
+ name = os.path.split (f1)[1]
+ name = re.sub ('-[0-9]+.signature', '', name)
+
+ file_link = None
+ try:
+ file_link = self.file_links[name]
+ except KeyError:
+ generic_f1 = re.sub ('-[0-9]+.signature', '.ly', f1)
+ generic_f2 = re.sub ('-[0-9]+.signature', '.ly', f2)
+ file_link = SignatureFileLink (generic_f1, generic_f2)
+ self.file_links[name] = file_link
+
+ file_link.add_file_compare (f1, f2)
+
+ def write_changed (self, dest_dir, threshold):
+ (changed, below, unchanged) = self.thresholded_results (threshold)
+
+ str = '\n'.join ([os.path.splitext (link.file_names[1])[0]
+ for link in changed])
+ fn = dest_dir + '/changed.txt'
+
+ open_write_file (fn).write (str)
+
+ def thresholded_results (self, threshold):
+ ## todo: support more scores.
+ results = [(link.distance(), link)
+ for link in self.file_links.values ()]
+ results.sort ()
+ results.reverse ()
+
+ unchanged = [r for (d,r) in results if d == 0.0]
+ below = [r for (d,r) in results if threshold >= d > 0.0]
+ changed = [r for (d,r) in results if d > threshold]
+
+ return (changed, below, unchanged)
+
+ def write_text_result_page (self, filename, threshold):
+ out = None
+ if filename == '':
+ out = sys.stdout
+ else:
+ print 'writing "%s"' % filename
+ out = open_write_file (filename)
+
+ (changed, below, unchanged) = self.thresholded_results (threshold)
+
+
+ for link in changed:
+ out.write (link.text_record_string ())
+
+ out.write ('\n\n')
+ out.write ('%d below threshold\n' % len (below))
+ out.write ('%d unchanged\n' % len (unchanged))
+
+ def create_text_result_page (self, dir1, dir2, dest_dir, threshold):
+ self.write_text_result_page (dest_dir + '/index.txt', threshold)
+
+ def create_html_result_page (self, dir1, dir2, dest_dir, threshold):
+ dir1 = dir1.replace ('//', '/')
+ dir2 = dir2.replace ('//', '/')
+
+ (changed, below, unchanged) = self.thresholded_results (threshold)
+
+
+ html = ''
+ old_prefix = os.path.split (dir1)[1]
+ for link in changed:
+ html += link.html_record_string (dest_dir)
+
+
+ short_dir1 = shorten_string (dir1)
+ short_dir2 = shorten_string (dir2)
+ html = '''<html>
+<table rules="rows" border bordercolor="blue">
+<tr>
+<th>distance</th>
+<th>%(short_dir1)s</th>
+<th>%(short_dir2)s</th>
+</tr>
+%(html)s
+</table>
+</html>''' % locals()
+
+ html += ('<p>')
+ below_count = len (below)
+
+ if below_count:
+ html += ('<p>%d below threshold</p>' % below_count)
+
+ html += ('<p>%d unchanged</p>' % len (unchanged))
+
+ dest_file = dest_dir + '/index.html'
+ open_write_file (dest_file).write (html)
+
+
+ for link in changed:
+ link.link_files_for_html (dest_dir)
+
+
+ def print_results (self, threshold):
+ self.write_text_result_page ('', threshold)
+
+def compare_trees (dir1, dir2, dest_dir, threshold):
+ data = ComparisonData ()
+ data.compare_trees (dir1, dir2)
+ data.read_sources ()
+
+
+ data.print_results (threshold)
+
+ if os.path.isdir (dest_dir):
+ system ('rm -rf %s '% dest_dir)
+
+ data.write_changed (dest_dir, threshold)
+ data.create_html_result_page (dir1, dir2, dest_dir, threshold)
+ data.create_text_result_page (dir1, dir2, dest_dir, threshold)
+
+################################################################
+# TESTING
+
+def mkdir (x):
+ if not os.path.isdir (x):
+ print 'mkdir', x
+ os.makedirs (x)
+
+def link_file (x, y):
+ mkdir (os.path.split (y)[0])
+ try:
+ print x, '->', y
+ os.link (x, y)
+ except OSError, z:
+ print 'OSError', x, y, z
+ raise OSError
+
+def open_write_file (x):
+ d = os.path.split (x)[0]
+ mkdir (d)
+ return open (x, 'w')
+
+
+def system (x):
+
+ print 'invoking', x
+ stat = os.system (x)
+ assert stat == 0
+
+
+def test_paired_files ():
+ print paired_files (os.environ["HOME"] + "/src/lilypond/scripts/",
+ os.environ["HOME"] + "/src/lilypond-stable/scripts/build/", '*.py')
+
+
+def test_compare_trees ():
+ system ('rm -rf dir1 dir2')
+ system ('mkdir dir1 dir2')
+ system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir1')
+ system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir2')
+ system ('cp 20expr{-*.signature,.ly,.png,.eps,.log,.profile} dir1')
+ system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir2/')
+ system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir1/')
+ system ('cp 19-1.signature 19.sub-1.signature')
+ system ('cp 19.ly 19.sub.ly')
+ system ('cp 19.profile 19.sub.profile')
+ system ('cp 19.log 19.sub.log')
+ system ('cp 19.png 19.sub.png')
+ system ('cp 19.eps 19.sub.eps')
+
+ system ('cp 20multipage* dir1')
+ system ('cp 20multipage* dir2')
+ system ('cp 19multipage-1.signature dir2/20multipage-1.signature')
+
+
+ system ('mkdir -p dir1/subdir/ dir2/subdir/')
+ system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir1/subdir/')
+ system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir2/subdir/')
+ system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir2/')
+ system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir1/')
+ system ('echo HEAD is 1 > dir1/tree.gittxt')
+ system ('echo HEAD is 2 > dir2/tree.gittxt')
+
+ ## introduce differences
+ system ('cp 19-1.signature dir2/20-1.signature')
+ system ('cp 19.profile dir2/20.profile')
+ system ('cp 19.png dir2/20.png')
+ system ('cp 19multipage-page1.png dir2/20multipage-page1.png')
+ system ('cp 20-1.signature dir2/subdir/19.sub-1.signature')
+ system ('cp 20.png dir2/subdir/19.sub.png')
+ system ("sed 's/: /: 1/g' 20.profile > dir2/subdir/19.sub.profile")
+
+ ## radical diffs.
+ system ('cp 19-1.signature dir2/20grob-1.signature')
+ system ('cp 19-1.signature dir2/20grob-2.signature')
+ system ('cp 19multipage.midi dir1/midi-differ.midi')
+ system ('cp 20multipage.midi dir2/midi-differ.midi')
+ system ('cp 19multipage.log dir1/log-differ.log')
+ system ('cp 19multipage.log dir2/log-differ.log && echo different >> dir2/log-differ.log && echo different >> dir2/log-differ.log')
+
+ compare_trees ('dir1', 'dir2', 'compare-dir1dir2', options.threshold)
+
+
+def test_basic_compare ():
+ ly_template = r"""
+
+\version "2.10.0"
+#(define default-toplevel-book-handler
+ print-book-with-defaults-as-systems )
+
+#(ly:set-option (quote no-point-and-click))
+
+\sourcefilename "my-source.ly"
+
+%(papermod)s
+\header { tagline = ##f }
+\score {
+<<
+\new Staff \relative c {
+ c4^"%(userstring)s" %(extragrob)s
+ }
+\new Staff \relative c {
+ c4^"%(userstring)s" %(extragrob)s
+ }
+>>
+\layout{}
+}
+
+"""
+
+ dicts = [{ 'papermod' : '',
+ 'name' : '20',
+ 'extragrob': '',
+ 'userstring': 'test' },
+ { 'papermod' : '#(set-global-staff-size 19.5)',
+ 'name' : '19',
+ 'extragrob': '',
+ 'userstring': 'test' },
+ { 'papermod' : '',
+ 'name' : '20expr',
+ 'extragrob': '',
+ 'userstring': 'blabla' },
+ { 'papermod' : '',
+ 'name' : '20grob',
+ 'extragrob': 'r2. \\break c1',
+ 'userstring': 'test' },
+ ]
+
+ for d in dicts:
+ open (d['name'] + '.ly','w').write (ly_template % d)
+
+ names = [d['name'] for d in dicts]
+
+ system ('lilypond -ddump-profile -dseparate-log-files -ddump-signatures --png -dbackend=eps ' + ' '.join (names))
+
+
+ multipage_str = r'''
+ #(set-default-paper-size "a6")
+ \score {
+ \relative {c1 \pageBreak c1 }
+ \layout {}
+ \midi {}
+ }
+ '''
+
+ open ('20multipage.ly', 'w').write (multipage_str.replace ('c1', 'd1'))
+ open ('19multipage.ly', 'w').write ('#(set-global-staff-size 19.5)\n' + multipage_str)
+ system ('lilypond -dseparate-log-files -ddump-signatures --png 19multipage 20multipage ')
+
+ test_compare_signatures (names)
+
+def test_compare_signatures (names, timing=False):
+ import time
+
+ times = 1
+ if timing:
+ times = 100
+
+ t0 = time.clock ()
+
+ count = 0
+ for t in range (0, times):
+ sigs = dict ((n, read_signature_file ('%s-1.signature' % n)) for n in names)
+ count += 1
+
+ if timing:
+ print 'elapsed', (time.clock() - t0)/count
+
+
+ t0 = time.clock ()
+ count = 0
+ combinations = {}
+ for (n1, s1) in sigs.items():
+ for (n2, s2) in sigs.items():
+ combinations['%s-%s' % (n1, n2)] = SystemLink (s1,s2).distance ()
+ count += 1
+
+ if timing:
+ print 'elapsed', (time.clock() - t0)/count
+
+ results = combinations.items ()
+ results.sort ()
+ for k,v in results:
+ print '%-20s' % k, v
+
+ assert combinations['20-20'] == (0.0,0.0,0.0)
+ assert combinations['20-20expr'][0] > 0.0
+ assert combinations['20-19'][2] < 10.0
+ assert combinations['20-19'][2] > 0.0
+
+
+def run_tests ():
+ dir = 'test-output-distance'
+
+ do_clean = not os.path.exists (dir)
+
+ print 'test results in ', dir
+ if do_clean:
+ system ('rm -rf ' + dir)
+ system ('mkdir ' + dir)
+
+ os.chdir (dir)
+ if do_clean:
+ test_basic_compare ()
+
+ test_compare_trees ()
+
+################################################################
+#
+
+def main ():
+ p = optparse.OptionParser ("output-distance - compare LilyPond formatting runs")
+ p.usage = 'output-distance.py [options] tree1 tree2'
+
+ p.add_option ('', '--test-self',
+ dest="run_test",
+ action="store_true",
+ help='run test method')
+
+ p.add_option ('--max-count',
+ dest="max_count",
+ metavar="COUNT",
+ type="int",
+ default=0,
+ action="store",
+ help='only analyze COUNT signature pairs')
+
+ p.add_option ('', '--threshold',
+ dest="threshold",
+ default=0.3,
+ action="store",
+ type="float",
+ help='threshold for geometric distance')
+
+ p.add_option ('--no-compare-images',
+ dest="compare_images",
+ default=True,
+ action="store_false",
+ help="Don't run graphical comparisons")
+
+ p.add_option ('--create-images',
+ dest="create_images",
+ default=False,
+ action="store_true",
+ help="Create PNGs from EPSes")
+
+
+ p.add_option ('--local-datadir',
+ dest="local_data_dir",
+ default=False,
+ action="store_true",
+ help='whether to use the share/lilypond/ directory in the test directory')
+
+ p.add_option ('-o', '--output-dir',
+ dest="output_dir",
+ default=None,
+ action="store",
+ type="string",
+ help='where to put the test results [tree2/compare-tree1tree2]')
+
+ global options
+ (options, args) = p.parse_args ()
+
+ if options.run_test:
+ run_tests ()
+ sys.exit (0)
+
+ if len (args) != 2:
+ p.print_usage()
+ sys.exit (2)
+
+ name = options.output_dir
+ if not name:
+ name = args[0].replace ('/', '')
+ name = os.path.join (args[1], 'compare-' + shorten_string (name))
+
+ compare_trees (args[0], args[1], name, options.threshold)
+
+if __name__ == '__main__':
+ main()
+
--- /dev/null
+#!@PYTHON@
+
+import os
+import re
+import sys
+
+frm = re.compile (sys.argv[1], re.MULTILINE)
+to = sys.argv[2]
+
+if not sys.argv[3:] or sys.argv[3] == '-':
+ sys.stdout.write (re.sub (frm, to, sys.stdin.read ()))
+for file in sys.argv[3:]:
+ s = open (file).read ()
+ name = os.path.basename (file)
+ base, ext = os.path.splitext (name)
+ t = re.sub (frm, to % locals (), s)
+ if s != t:
+ if 1:
+ os.system ('mv %(file)s %(file)s~~' % locals ())
+ h = open (file, "w")
+ h.write (t)
+ h.close ()
+ else:
+ sys.stdout.write (t)
--- /dev/null
+#!@PYTHON@
+# -*- coding: utf-8 -*-
+# texi-gettext.py
+
+# USAGE: texi-gettext.py [-o OUTDIR] LANG FILES
+#
+# -o OUTDIR specifies that output files should rather be written in OUTDIR
+#
+
+print "texi_gettext.py"
+
+import sys
+import re
+import os
+import getopt
+
+import langdefs
+
+optlist, args = getopt.getopt (sys.argv[1:],'o:')
+lang = args[0]
+files = args[1:]
+
+outdir = '.'
+for x in optlist:
+ if x[0] == '-o':
+ outdir = x[1]
+
+double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
+_doc = langdefs.translation[lang]
+
+include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M)
+whitespaces = re.compile (r'\s+')
+ref_re = re.compile (r'(?ms)@(rglos|ruser|rprogram|ref)(\{)(.*?)(\})')
+node_section_re = re.compile (r'@(node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading)( )(.*?)(\n)')
+menu_entry_re = re.compile (r'\* (.*?)::')
+
+def title_gettext (m):
+ if m.group (2) == '{':
+ r = whitespaces.sub (' ', m.group (3))
+ else:
+ r = m.group (3)
+ return '@' + m.group (1) + m.group (2) + _doc (r) + m.group (4)
+
+def menu_entry_gettext (m):
+ return '* ' + _doc (m.group (1)) + '::'
+
+def include_replace (m, filename):
+ if os.path.exists (os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'):
+ return '@include ' + m.group(1) + '.pdftexi'
+ return m.group(0)
+
+def process_file (filename):
+ print "Processing %s" % filename
+ f = open (filename, 'r')
+ page = f.read ()
+ f.close()
+ page = node_section_re.sub (title_gettext, page)
+ page = ref_re.sub (title_gettext, page)
+ page = menu_entry_re.sub (menu_entry_gettext, page)
+ page = page.replace ("""-- SKELETON FILE --
+When you actually translate this file, please remove these lines as
+well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", '')
+ page = page.replace ('UNTRANSLATED NODE: IGNORE ME', _doc ("This section has not been translated yet; please refer to the manual in English."))
+ includes = include_re.findall (page)
+ page = include_re.sub (lambda m: include_replace (m, filename), page)
+ p = os.path.join (outdir, filename) [:-4] + 'pdftexi'
+ f = open (p, 'w')
+ f.write (page)
+ f.close ()
+ dir = os.path.dirname (filename)
+ for file in includes:
+ p = os.path.join (dir, file) + '.texi'
+ if os.path.exists (p):
+ process_file (p)
+
+for filename in files:
+ process_file (filename)
--- /dev/null
+#!@PYTHON@
+
+import getopt
+import os
+import re
+import sys
+import time
+
+def usage ():
+ sys.stderr.write ('''
+texi2omf [options] FILE.texi > FILE.omf
+
+Options:
+
+--format=FORM set format FORM (HTML, PS, PDF, [XML]).
+--location=FILE file name as installed on disk.
+--version=VERSION
+
+Use the following commands (enclose in @ignore)
+
+@omfsubject . .
+@omfdescription . .
+@omftype . .
+
+etc.
+
+
+''')
+
+(options, files) = getopt.getopt (sys.argv[1:], '',
+ ['format=', 'location=', 'version='])
+
+license = 'FDL'
+location = ''
+version = ''
+email = os.getenv ('MAILADDRESS')
+name = os.getenv ('USERNAME')
+format = 'xml'
+
+for (o, a) in options:
+ if o == '--format':
+ format = a
+ elif o == '--location':
+ location = 'file:%s' % a
+ elif o == '--version':
+ version = a
+ else:
+ assert 0
+
+
+if not files:
+ usage ()
+ sys.exit (2)
+
+
+formats = {
+ 'html' : 'text/html',
+ 'pdf' : 'application/pdf',
+ 'ps.gz' : 'application/postscript',
+ 'ps' : 'application/postscript',
+ 'xml' : 'text/xml',
+ }
+
+if not formats.has_key (format):
+ sys.stderr.write ("Format `%s' unknown\n" % format)
+ sys.exit (1)
+
+
+infile = files[0]
+
+today = time.localtime ()
+
+texi = open (infile).read ()
+
+if not location:
+ location = 'file:/%s' % re.sub (r'\..*', '.' + format, infile)
+
+omf_vars = {
+ 'date': '%d-%d-%d' % today[:3],
+ 'mimeformat': formats[format],
+ 'maintainer': "%s (%s)" % (name, email),
+ 'version' : version,
+ 'location' : location,
+ 'language' : 'C',
+ }
+
+omf_caterories = ['subject', 'creator', 'maintainer', 'contributor',
+ 'title', 'subtitle', 'version', 'category', 'type',
+ 'description', 'license', 'language',]
+
+for a in omf_caterories:
+ m = re.search ('@omf%s (.*)\n'% a, texi)
+ if m:
+ omf_vars[a] = m.group (1)
+ elif not omf_vars.has_key (a):
+ omf_vars[a] = ''
+
+if not omf_vars['title']:
+ title = ''
+ m = re.search ('@title (.*)\n', texi)
+ if m:
+ title = m.group (1)
+
+ subtitle = ''
+ m = re.search ('@subtitle (.*)\n', texi)
+ if m:
+ subtitle = m.group (1)
+
+ if subtitle:
+ title = '%s -- %s' % (title, subtitle)
+
+ omf_vars['title'] = title
+
+if not omf_vars['creator']:
+ m = re.search ('@author (.*)\n', texi)
+ if m:
+ omf_vars['creator'] = m.group (1)
+
+
+
+print r'''<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE omf PUBLIC "-//OMF//DTD Scrollkeeper OMF Variant V1.0//EN" "http://scrollkeeper.sourceforge.net/dtds/scrollkeeper-omf-1.0/scrollkeeper-omf.dtd">
+<omf>
+ <resource>
+ <creator>
+ %(creator)s
+ </creator>
+ <maintainer>
+ %(maintainer)s
+ </maintainer>
+ <title>
+ %(title)s
+ </title>
+ <date>
+ %(date)s
+ </date>
+ <version identifier="%(version)s" date="%(date)s" />
+ <subject category="%(category)s"/>
+ <description>
+ %(description)s
+ </description>
+ <type>
+ %(type)s
+ </type>
+ <format mime="%(mimeformat)s" />
+ <identifier url="%(location)s"/>
+ <language code="%(language)s"/>
+ <rights type="%(license)s" />
+ </resource>
+</omf>
+
+''' % omf_vars
+
+
--- /dev/null
+#!@PYTHON@
+
+## This is www_post.py. This script is the main stage
+## of toplevel GNUmakefile local-WWW-post target.
+
+# USAGE: www_post PACKAGE_NAME TOPLEVEL_VERSION OUTDIR TARGETS
+# please call me from top of the source directory
+
+import sys
+import os
+import re
+
+import langdefs
+
+import mirrortree
+import postprocess_html
+
+package_name, package_version, outdir, targets = sys.argv[1:]
+targets = targets.split (' ')
+outdir = os.path.normpath (outdir)
+doc_dirs = ['input', 'Documentation', outdir]
+target_pattern = os.path.join (outdir, '%s-root')
+
+# these redirection pages allow to go back to the documentation index
+# from HTML manuals/snippets page
+static_files = {
+ os.path.join (outdir, 'index.html'):
+ '''<META HTTP-EQUIV="refresh" content="0;URL=Documentation/index.html">
+<html><body>Redirecting to the documentation index...</body></html>\n''',
+ os.path.join (outdir, 'VERSION'):
+ package_version + '\n',
+ os.path.join ('input', 'lsr', outdir, 'index.html'):
+ '''<META HTTP-EQUIV="refresh" content="0;URL=../../index.html">
+<html><body>Redirecting to the documentation index...</body></html>\n'''
+ }
+
+for l in langdefs.LANGUAGES:
+ static_files[os.path.join ('Documentation', 'user', outdir, l.file_name ('index', '.html'))] = \
+ '<META HTTP-EQUIV="refresh" content="0;URL=../' + l.file_name ('index', '.html') + \
+ '">\n<html><body>Redirecting to the documentation index...</body></html>\n'
+
+for f, contents in static_files.items ():
+ open (f, 'w').write (contents)
+
+sys.stderr.write ("Mirrorring...\n")
+dirs, symlinks, files = mirrortree.walk_tree (
+ tree_roots = doc_dirs,
+ process_dirs = outdir,
+ exclude_dirs = '(^|/)(' + r'|po|out|out-test|.*?[.]t2d|\w*?-root)(/|$)|Documentation/(' + '|'.join ([l.code for l in langdefs.LANGUAGES]) + ')',
+ find_files = r'.*?\.(?:midi|html|pdf|png|txt|i?ly|signature|css)$|VERSION',
+ exclude_files = r'lily-[0-9a-f]+.*\.(pdf|txt)')
+
+# actual mirrorring stuff
+html_files = []
+hardlinked_files = []
+for f in files:
+ if f.endswith ('.html'):
+ html_files.append (f)
+ else:
+ hardlinked_files.append (f)
+dirs = [re.sub ('/' + outdir, '', d) for d in dirs]
+while outdir in dirs:
+ dirs.remove (outdir)
+dirs = list (set (dirs))
+dirs.sort ()
+
+strip_file_name = {}
+strip_re = re.compile (outdir + '/')
+for t in targets:
+ out_root = target_pattern % t
+ strip_file_name[t] = lambda s: os.path.join (target_pattern % t, (strip_re.sub ('', s)))
+ os.mkdir (out_root)
+ map (os.mkdir, [os.path.join (out_root, d) for d in dirs])
+ for f in hardlinked_files:
+ os.link (f, strip_file_name[t] (f))
+ for l in symlinks:
+ p = mirrortree.new_link_path (os.path.normpath (os.readlink (l)), os.path.dirname (l), strip_re)
+ dest = strip_file_name[t] (l)
+ if not os.path.exists (dest):
+ os.symlink (p, dest)
+
+ ## ad-hoc renaming to make xrefs between PDFs work
+ os.rename (os.path.join (out_root, 'input/lsr/lilypond-snippets.pdf'),
+ os.path.join (out_root, 'Documentation/user/lilypond-snippets.pdf'))
+
+# need this for content negotiation with documentation index
+if 'online' in targets:
+ f = open (os.path.join (target_pattern % 'online', 'Documentation/.htaccess'), 'w')
+ f.write ('#.htaccess\nDirectoryIndex index\n')
+ f.close ()
+
+postprocess_html.build_pages_dict (html_files)
+for t in targets:
+ sys.stderr.write ("Processing HTML pages for %s target...\n" % t)
+ postprocess_html.process_html_files (
+ package_name = package_name,
+ package_version = package_version,
+ target = t,
+ name_filter = strip_file_name[t])
+
# We must invoke the generated $(outdir)/help2man script instead of
-# the help2man.pl source, which means that the buildscripts directory
+# the help2man.pl source, which means that the scripts/build directory
# must be built first.
#
# From the perlrun man-page:
# cases. Four more explaining what a line comment is, and that it may
# be parsed, same here.
-HELP2MAN_COMMAND = $(PERL) $(top-build-dir)/buildscripts/$(outbase)/help2man $< > $@
+HELP2MAN_COMMAND = $(buildscript-dir)/help2man $< > $@
ifeq ($(strip $(CROSS)),no)
-$(outdir)/%.1: $(outdir)/%
+$(outdir)/%.1: $(outdir)/% $(buildscript-dir)/help2man
$(HELP2MAN_COMMAND)
else
# When cross building, some manpages will not build because the
$(outdir)/%.1: out/%.1
cp $< $@
endif
+
+$(buildscript-dir)/help2man:
+ $(MAKE) -C $(depth)/scripts/build
TMP=`mktemp -d $(outdir)/pfbtemp.XXXXXXXXX` \
&& ( cd $$TMP \
&& ln -s ../mf2pt1.mem . \
- && MFINPUTS=$(top-src-dir)/mf:..:: $(PERL) $(top-src-dir)/buildscripts/mf2pt1.pl $(MF2PT1_OPTIONS) $< $(METAFONT_QUIET)) \
+ && MFINPUTS=$(top-src-dir)/mf:..:: $(buildscript-dir)/mf2pt1 $(MF2PT1_OPTIONS) $< $(METAFONT_QUIET)) \
&& mv $$TMP/*pfb $(outdir); \
rm -rf $$TMP
+++ /dev/null
-# Don't remove $(outdir)/.log's. Logs are a target!
-
-$(outdir)/%.0: %.mf $(outdir)/mfplain.mem
- -$(METAPOST) "&$(outdir)/mfplain \mode=lowres; \mag=1.0; nonstopmode; input $<"
-
-$(outdir)/mfplain.mem: $(MFPLAIN_MP)
- $(INIMETAPOST) $(INIMETAPOST_FLAGS) $(MFPLAIN_MP) dump
- mv mfplain.* $(outdir)
-
-$(outdir)/%.pfa: $(outdir)/%.0
- $(PYTHON) $(depth)/buildscripts/ps-to-pfa.py --output $(basename $<).pfa $<
- rm -f $(basename $(@F)).[0-9]*
- rm -f $(basename $(@F)).tfm $(basename $(@F)).log
-
+++ /dev/null
-
-pfa: $(PFA_FILES)
+++ /dev/null
-
-MP_PFA_FILES = $(addprefix $(outdir)/, $(FONT_FILES:.mf=.pfa))
-PFA_FILES += $(MP_PFA_FILES)
-
# $(outdir)/$(INFO_IMAGES_DIR)/*.png symlinks are only needed to view
# out-www/*.info with Emacs -- HTML docs no longer need these
# symlinks, see replace_symlinks_urls in
-# buildscripts/add_html_footer.py.
+# python/aux/postprocess_html.py.
# make dereferences symlinks, and $(INFO_IMAGES_DIR) is a symlink
# to $(outdir), so we can't use directly $(INFO_IMAGES_DIR) as a
# prerequisite, otherwise %.info are always outdated (because older
-# than $(outdir), hence this .dep file
+# than $(outdir)), hence this .dep file
$(outdir)/$(INFO_IMAGES_DIR).info-images-dir-dep: $(INFO_DOCS:%=$(outdir)/%.texi)
ifneq ($(INFO_IMAGES_DIR),)
ln -s $(outdir) $(INFO_IMAGES_DIR)
mkdir -p $(outdir)/$(INFO_IMAGES_DIR)
rm -f $(outdir)/$(INFO_IMAGES_DIR)/[a-f0-9][a-f0-9]
- cd $(outdir)/$(INFO_IMAGES_DIR) && $(PYTHON) $(top-src-dir)/buildscripts/mass-link.py symbolic .. . [a-f0-9][a-f0-9]
+ cd $(outdir)/$(INFO_IMAGES_DIR) && $(buildscript-dir)/mass-link symbolic .. . [a-f0-9][a-f0-9]
endif
touch $@
cp $< $@
$(XREF_MAPS_DIR)/%.xref-map: $(outdir)/%.texi
- $(PYTHON) $(buildscript-dir)/extract_texi_filenames.py -o $(XREF_MAPS_DIR) $<
+ $(buildscript-dir)/extract_texi_filenames -o $(XREF_MAPS_DIR) $<
$(outdir)/version.%: $(top-src-dir)/VERSION
OUTTXT_FILES += $(addprefix $(outdir)/,$(TEXI_FILES:.texi=.txt))
-GENERATE_OMF = $(PYTHON) $(buildscript-dir)/texi2omf.py --format $(1) --location $(webdir)/$(tree-dir)/out-www/$(notdir $(basename $@)) --version $(TOPLEVEL_VERSION) $< > $@
+GENERATE_OMF = $(buildscript-dir)/texi2omf --format $(1) --location $(webdir)/$(tree-dir)/out-www/$(notdir $(basename $@)) --version $(TOPLEVEL_VERSION) $< > $@
TEXINFO_PAPERSIZE_OPTION= $(if $(findstring $(PAPERSIZE),a4),,-t @afourpaper)
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'install', 'src_glob')
-sources = src_glob ('*.tex')
-install (sources, env['sharedir_package_version'] + '/tex')
LILYPOND_WORDS = $(outdir)/lilypond-words $(outdir)/lilypond-words.vim
LILYPOND_WORDS_DEPENDS =\
$(top-src-dir)/lily/lily-lexer.cc \
- $(buildscript-dir)/lilypond-words.py \
+ $(buildscript-dir)/lilypond-words \
$(top-src-dir)/scm/markup.scm \
$(top-src-dir)/ly/engraver-init.ly
done
-rmdir -p $(DESTDIR)$(vimdir)
+$(buildscript-dir)/lilypond-words:
+ make -C $(depth)/scripts/build
+
$(LILYPOND_WORDS):
- cd $(top-src-dir) && $(PYTHON) buildscripts/lilypond-words.py --words --vim --dir=$(top-build-dir)/vim/$(outconfbase)
+ cd $(top-src-dir) && $(buildscript-dir)/lilypond-words --words --vim --dir=$(top-build-dir)/vim/$(outconfbase)
all: $(LILYPOND_WORDS)
+++ /dev/null
-# -*-python-*-
-
-Import ('env', 'install', 'src_glob')
-sources = src_glob ('*.vim') + ['lilypond-words.vim']
-
-e = env.Copy ()
-a = '$PYTHON $srcdir/buildscripts/lilypond-words.py --words --vim --dir=${TARGET.dir}'
-e.Command ('lilypond-words.vim',
- ['#/lily/lily-lexer.cc',
- '#/buildscripts/lilypond-words.py',
- '#/scm/markup.scm',
- '#/ly/engraver-init.ly',],
- a)
-
-install (sources, env['sharedir_package_version'] + '/vim')