From: John Mandereau Date: Mon, 5 Jan 2009 09:33:19 +0000 (+0100) Subject: Merge branch 'lilypond/translation' of ssh://jomand@git.sv.gnu.org/srv/git/lilypond X-Git-Tag: release/2.12.2-1~32^2~17 X-Git-Url: https://git.donarmstrong.com/?a=commitdiff_plain;h=6140c6eb657080939fa4aef3d00d717bd85b5028;hp=a1a743e61326bcee9ae22c557610d52769bc32c5;p=lilypond.git Merge branch 'lilypond/translation' of ssh://jomand@git.sv.gnu.org/srv/git/lilypond --- diff --git a/Documentation/GNUmakefile b/Documentation/GNUmakefile index 0e5fc0a5e2..790b046047 100644 --- a/Documentation/GNUmakefile +++ b/Documentation/GNUmakefile @@ -36,6 +36,9 @@ $(OUT_HTML_FILES): $(OUT_CSS_FILES) $(outdir)/%.css: %.css ln -f $< $@ + +### Translations maintenance targets + po-update: make -C po po-update @@ -56,7 +59,7 @@ new-lang: cp fr/GNUmakefile $(ISOLANG) cp fr/user/GNUmakefile $(ISOLANG)/user sed -i -e 's/ISOLANG *= *fr/ISOLANG = $(ISOLANG)/' $(ISOLANG)/GNUmakefile $(ISOLANG)/user/GNUmakefile - $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) -o doc.pot --skeleton --gettext ../user/lilypond-learning.tely + $(auxscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) -o doc.pot --skeleton --gettext ../user/lilypond-learning.tely mv $(outdir)/*.*tely $(ISOLANG)/user msgmerge -U po/lilypond-doc.pot $(outdir)/doc.pot cp po/lilypond-doc.pot po/$(ISOLANG).po @@ -66,11 +69,11 @@ CHECKED_FILES = $(ISOLANG)/index.html.in $(shell find $(ISOLANG)/user/ -maxdepth TELY_FILES = $(call src-wildcard,$(ISOLANG)/user/*.tely) skeleton-update: - $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) --skeleton $(TELY_FILES:$(ISOLANG)/user/%.tely=../user/%.tely) - $(PYTHON) $(buildscript-dir)/texi-skeleton-update.py $(ISOLANG)/user $(outdir) + $(auxscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) --skeleton $(TELY_FILES:$(ISOLANG)/user/%.tely=../user/%.tely) + $(auxscript-dir)/texi-skeleton-update.py $(ISOLANG)/user $(outdir) snippet-update: - $(PYTHON) $(buildscript-dir)/update-snippets.py user $(ISOLANG)/user '*.itely' + $(auxscript-dir)/update-snippets.py user $(ISOLANG)/user '*.itely' DOCUMENTS_INCLUDES:=-I $(ISOLANG)/user \ -I $(top-build-dir)/Documentation/$(ISOLANG)/user/out-www \ @@ -88,22 +91,22 @@ DOCUMENTS_INCLUDES:=-I user \ endif # ISOLANG check-xrefs: - $(PYTHON) $(buildscript-dir)/check_texi_refs.py --batch \ - $(DOCUMENTS_INCLUDES) $(buildscript-dir)/manuals_definitions.py + $(auxscript-dir)/check_texi_refs.py --batch \ + $(DOCUMENTS_INCLUDES) $(auxpython-dir)/manuals_definitions.py fix-xrefs: - $(PYTHON) $(buildscript-dir)/check_texi_refs.py --auto-fix \ - $(DOCUMENTS_INCLUDES) $(buildscript-dir)/manuals_definitions.py + $(auxscript-dir)/check_texi_refs.py --auto-fix \ + $(DOCUMENTS_INCLUDES) $(auxpython-dir)/manuals_definitions.py check-translation: - $(PYTHON) $(buildscript-dir)/check_translation.py $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES) + $(auxscript-dir)/check_translation.py $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES) update-translation: - $(PYTHON) $(buildscript-dir)/check_translation.py --update $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES) + $(auxscript-dir)/check_translation.py --update $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES) translation-status: make -C po out=www messages - $(PYTHON) $(buildscript-dir)/translations-status.py + $(auxscript-dir)/translations-status.py local-help: extra-local-help diff --git a/Documentation/SConscript b/Documentation/SConscript deleted file mode 100644 index 5813e91711..0000000000 --- a/Documentation/SConscript +++ /dev/null @@ -1,5 +0,0 @@ -# -*-python-*- - -Import ('env') -env.AT_COPY ('index.html.in') - diff --git a/Documentation/TRANSLATION b/Documentation/TRANSLATION index 4651e05d6d..ada4bd7b15 100644 --- a/Documentation/TRANSLATION +++ b/Documentation/TRANSLATION @@ -93,7 +93,7 @@ Cd into Documentation and run: where is the ISO 639 language code. Add a language definition for your language in -buildscripts/langdefs.py. +python/langdefs.py. See next section about what files to translate and the following detailed instructions after the next section. @@ -670,25 +670,38 @@ which works regardless of the branch checked out. TECHNICAL BACKGROUND A number of Python scripts handle a part of the documentation -translation process. All are located in buildscripts/, except -langdefs.py which is in python/ +translation process. +All scripts used to maintain the translations +are located in scripts/aux/: -* buildlib.py -- module containing common functions (read piped output -of a shell command, use Git) -* langdefs.py -- language definitions module * check_translation.py -- show diff to update a translation * texi-langutils.py -- quickly and dirtily parse Texinfo files to make message catalogs and Texinfo skeleton files * texi-skeleton-update.py -- update Texinfo skeleton files +* update-snippets.py -- synchronize ly snippets with those from +English docs +* translations-status.py -- update translations status pages and word +counts in the file you are reading. +* tely-gettext.py -- gettext node names, section titles and references +in the sources; WARNING only use this script when support for +"makeinfo --html" has been dropped. + +Other scripts are used in the build process, in scripts/build/: * html-gettext.py -- translate node names, section titles and cross references in HTML files generated by makeinfo -* add_html_footer.py (module imported by www_post.py) -- add footer and -tweak links in HTML pages * texi-gettext.py -- gettext node names, section titles and references before calling texi2pdf * mass-link.py -- link or symlink files between English documentation and documentation in other languages -* update-snippets.py -- synchronize ly snippets with those from -English docs -* translations-status.py -- update translations status pages and word -counts in the file you are reading. + +Python modules used by scripts in scripts/aux/ or scripts/build/ (but +not by installed Python scripts) are located in python/aux/: +* manuals_definitions.py -- define manual names and name of +cross-reference Texinfo macros +* buildlib.py -- common functions (read piped output +of a shell command, use Git) +* postprocess_html.py (module imported by www_post.py) -- add footer and +tweak links in HTML pages + +And finally +* python/langdefs.py -- language definitions module diff --git a/Documentation/bibliography/GNUmakefile b/Documentation/bibliography/GNUmakefile index 89eb2d0bcb..798dad58a6 100644 --- a/Documentation/bibliography/GNUmakefile +++ b/Documentation/bibliography/GNUmakefile @@ -27,7 +27,7 @@ $(outdir)/%.bib: %.bib ln -f $< $@ $(outdir)/%.html: %.bib - BSTINPUTS=$(src-dir) $(PYTHON) $(buildscript-dir)/bib2html.py -o $@ $< + BSTINPUTS=$(src-dir) $(buildscript-dir)/bib2html -o $@ $< local-clean: rm -f fonts.aux fonts.log feta*.tfm feta*.*pk diff --git a/Documentation/bibliography/SConscript b/Documentation/bibliography/SConscript deleted file mode 100644 index 538cfbd3d4..0000000000 --- a/Documentation/bibliography/SConscript +++ /dev/null @@ -1,12 +0,0 @@ -# -*-python-*- - -Import ('env', 'src_glob') -bib = src_glob ('*.bib') -env.AT_COPY ('index.html.in') - -# todo: must make html-long.bst as source too. -# make as source? - -map (env.BIB2HTML, bib) -env.Alias ('doc', bib) - diff --git a/Documentation/fr/user/editorial.itely b/Documentation/fr/user/editorial.itely index 85c32f5ef5..0440dfeb9a 100644 --- a/Documentation/fr/user/editorial.itely +++ b/Documentation/fr/user/editorial.itely @@ -153,6 +153,11 @@ d'indiquer des doigtés très proches des têtes de notes. 4 @end lilypond +@snippets + +@lilypondfile[verbatim,lilyquote,texidoc,doctitle] +{avoiding-collisions-of-chord-fingering-with-beams.ly} + @seealso Référence du programme : @rinternals{Fingering}. diff --git a/Documentation/pictures/GNUmakefile b/Documentation/pictures/GNUmakefile index 438389f944..35ef738ae4 100644 --- a/Documentation/pictures/GNUmakefile +++ b/Documentation/pictures/GNUmakefile @@ -12,7 +12,7 @@ include $(depth)/make/stepmake.make ifeq ($(PLATFORM_WINDOWS),yes) $(outdir)/%.ico: %.xpm - $(PYTHON) $(buildscript-dir)/genicon.py $< $@ + $(buildscript-dir)/genicon $< $@ default: $(lilypond-icon) $(ly-icon) diff --git a/Documentation/topdocs/SConscript b/Documentation/topdocs/SConscript deleted file mode 100644 index 7ef165cd93..0000000000 --- a/Documentation/topdocs/SConscript +++ /dev/null @@ -1,14 +0,0 @@ -# -*-python-*- - -Import ('env', 'src_glob', 'install') - -tely = src_glob ('*.tely') -texi = src_glob ('*.texi') + map (env.TEXI, tely) - -txt = map (env.TXT, texi) -html = map (env.HTML, texi) - -env.Alias ('doc', txt) -env.Alias ('doc', html) - -install (txt, env['sharedir_doc_package']) diff --git a/Documentation/user/SConscript b/Documentation/user/SConscript deleted file mode 100644 index 5ef1efdd3d..0000000000 --- a/Documentation/user/SConscript +++ /dev/null @@ -1,83 +0,0 @@ -# -*-python-*- - -import os -import string - -Import ('env', 'base_glob', 'src_glob') -tely = base_glob ('*.tely') -png = src_glob ('*.png') + map (env.EPS2PNG, base_glob ('*.eps')) - -# We need lily and mf to build these. -env.Depends ('lilypond.texi', ['#/lily', '#/mf', '#/python']) -env.Depends ('music-glossary.texi', ['#/lily', '#/mf', '#/python']) - -env.Depends ('lilypond.texi', 'lilypond-internals.texi') - -eps = src_glob ('*.eps') + map (env.PNG2EPS, base_glob ('*.png')) -env.Depends ('lilypond.texi', eps + png) - -lilypond_book_flags = '''--format=$LILYPOND_BOOK_FORMAT --process="lilypond -I$srcdir/input/manual/ $__verbose --backend=eps --formats=ps,png --header=texidoc -dcheck-internal-types -ddump-signatures -danti-alias-factor=2 -dgs-load-fonts" ''' -e = env.Copy ( -# LILYPOND_BOOK_FLAGS = '''--process="lilypond --backend=eps --formats=ps,png --header=texidoc -I#/input/manual -e '(ly:set-option (quote internal-type-checking) \#t)'"''', - LILYPOND_BOOK_FLAGS = lilypond_book_flags, - __verbose = ' --verbose', - GENERATE_DOCUMENTATION = '$srcdir/ly/generate-documentation', - ## TEXI2DVI_FLAGS = ['-I#Documentation/user'], - ) - -e.Command ('lilypond-internals.texi', ['#/lily', '#/mf', '#/python'], - 'cd ${TARGET.dir} && $LILYPOND $GENERATE_DOCUMENTATION') - -## FIXME: implicit steps from [TE]LY -> PDF -texi = map (env.TEXI, tely) -dvi = map (env.TEXIDVI, tely) -ps = map (env.DVIPS, tely) ###map (lambda x: x + '.dvi', tely)) -dvipdf = map (env.DVIPDF, tely) -pdf = map (env.PSPDF, dvipdf) - -# FIXME: install -info = map (env.INFO, tely) - -def file_subst (file_name, find, subst): - s = open (file_name).read () - t = string.replace (s, find, subst) - if s != t: - os.rename (file_name, file_name + '~') - h = open (file_name, "w") - h.write (t) - h.close () - -e['usersrc'] = Dir ('.').srcnode ().abspath -e['userout'] = Dir ('.').abspath - -a = ['$MAKEINFO -I$usersrc -I${SOURCE.dir} --html \ - --css-include=$srcdir/Documentation/texinfo.css $__verbose \ - --output=${TARGET.dir} $SOURCE', - 'ln -f ${SOURCE.dir}/*.png ${SOURCE.dir}/*.ly ${TARGET.dir}/',] - -e.Command ('lilypond/index.html', 'lilypond.texi', a) -e.Command ('lilypond-internals/index.html', 'lilypond-internals.texi', a) -e.Command ('music-glossary/index.html', 'music-glossary.texi', a) - -a = ['$MAKEINFO -I$usersrc -I${SOURCE.dir} --html \ - --no-split --no-headers \ - --css-include=$srcdir/Documentation/texinfo.css $__verbose \ - --output=$TARGET $SOURCE'] - -e.Command ('lilypond.html', 'lilypond.texi', a) -e.Command ('lilypond-internals.html', 'lilypond-internals.texi', a) -e.Command ('music-glossary.html', 'music-glossary.texi', a) - -#Hmm -- why not just mv ./Documentation/{*,*/*} ./doc :-) -env.Alias ('doc', texi) -env.Alias ('doc', dvi) -env.Alias ('doc', ps) -env.Alias ('doc', pdf) - -env.Alias ('doc', 'lilypond/index.html') -env.Alias ('doc', 'lilypond-internals/index.html') -env.Alias ('doc', 'lilypond.html') -env.Alias ('doc', 'lilypond-internals.html') - -# install ('lilypond/*', env['sharedir_doc_package'] + '/html') -# install ('lilypond-user/*', env['sharedir_doc_package'] + '/html') diff --git a/Documentation/user/fundamental.itely b/Documentation/user/fundamental.itely index 355a338746..4c58135bae 100644 --- a/Documentation/user/fundamental.itely +++ b/Documentation/user/fundamental.itely @@ -2008,7 +2008,7 @@ should be enclosed in double quotation signs, as above, although we shall see later that text can actually be specified in a much more general way by using the very powerful @code{markup} command. -@unnumberedsubsubsec Setting context properties with @code{\with} +@subsubheading Setting context properties with @code{\with} @funindex \with @funindex with @@ -2054,7 +2054,7 @@ value of the font size. If it is later changed with @code{\set}, this new default value may be restored with the @code{\unset fontSize} command. -@unnumberedsubsubsec Setting context properties with @code{\context} +@subsubheading Setting context properties with @code{\context} @cindex context properties, setting with \context @funindex \context diff --git a/Documentation/user/install.itely b/Documentation/user/install.itely index 1b2f4df6ef..597aeb6900 100644 --- a/Documentation/user/install.itely +++ b/Documentation/user/install.itely @@ -479,11 +479,11 @@ CVS; especially the following patch: For checking the coverage of the test suite, do the following @example -./buildscripts/build-coverage.sh +./scripts/aux/build-coverage.sh @emph{# uncovered files, least covered first} -python ./buildscripts/coverage.py --summary out-cov/*.cc +./scripts/aux/coverage.py --summary out-cov/*.cc @emph{# consecutive uncovered lines, longest first} -python ./buildscripts/coverage.py --uncovered out-cov/*.cc +./scripts/aux/coverage.py --uncovered out-cov/*.cc @end example diff --git a/Documentation/user/music-glossary.tely b/Documentation/user/music-glossary.tely index 6e779da344..33ed1d902e 100644 --- a/Documentation/user/music-glossary.tely +++ b/Documentation/user/music-glossary.tely @@ -33,7 +33,7 @@ @c Fixes by Jean-Pierre Coulon and `Dirk', alphabetized by last name, KK, 10/07 @c Updates to the German translation by Till Rettig, 12/07 -Copyright @copyright{} 1999--2008 by the authors +Copyright @copyright{} 1999--2009 by the authors Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.1 @@ -163,7 +163,7 @@ The list is rather long ... @end ignore @* -Copyright 1999--2008 by the authors +Copyright 1999--2009 by the authors Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.1 @@ -227,6 +227,7 @@ Languages in this order. * accidental:: * adagio:: * al niente:: +* alla breve:: * allegro:: * alteration:: * alto:: @@ -290,6 +291,7 @@ Languages in this order. * crescendo:: * cue-notes:: * custos:: +* cut time:: * D:: * da capo:: * dal niente:: @@ -394,6 +396,7 @@ Languages in this order. * lyrics:: * major:: * major interval:: +* maxima:: * meantone temperament:: * measure:: * measure repeat:: @@ -402,6 +405,7 @@ Languages in this order. * melisma line:: * melodic cadence:: * mensural notation:: +* mensuration sign:: * meter:: * metronome:: * metronome mark:: @@ -472,6 +476,7 @@ Languages in this order. * scordatura:: * score:: * second:: +* semibreve:: * semitone:: * seventh:: * sextolet:: @@ -561,7 +566,6 @@ Languages in this order. @item FI: A, a @end itemize - @seealso @ref{Pitch names}. @@ -578,13 +582,13 @@ DK: ?, S: ?, FI: kahdelle. -Abbreviated @notation{a2} or @notation{a 2}. +Abbreviated @notation{a2} or @notation{a 2}. In orchestral scores, @notation{a +due} indicates that: @enumerate -@item An indication in orchestral scores that a single part notated on a single -staff that normally carries parts for two players (e.g. first and second oboes) -is to be played by both players. +@item A single part notated on a single staff that normally carries parts for +two players (e.g. first and second oboes) is to be played by both players. @item Or conversely, that two pitches or parts notated on a staff that normally carries a single part (e.g. first violin) are to be played by different players, @@ -592,9 +596,8 @@ or groups of players (@q{desks}). @end enumerate - @seealso -None yet. +No cross-references. @node accelerando @@ -609,13 +612,12 @@ DK: accelerando, S: accelerando, FI: accelerando, kiihdyttäen. -[Italian: @q{speed up, accelerate}.] +[Italian: @q{speed up, accelerate}] Increase tempo - @seealso -None yet. +No cross-references. @node accent @@ -632,9 +634,8 @@ FI: aksentti, korostus. The stress of one tone over others. - @seealso -None yet. +No cross-references. @node accessory @@ -647,12 +648,20 @@ None yet. @node acciaccatura @section acciaccatura +ES: ?, +I: acciaccatura, +F: ?, +D: ?, +NL: ?, +DK: ?, +S: ?, +FI: ?. + A grace note which takes its time from the rest or note preceding the principal note to which it is attached. The acciaccatura is drawn as a small eighth note (quaver) with a line drawn through the flag and stem. - @seealso @ref{appoggiatura}, @ref{grace notes}, @ref{ornament}. @@ -675,17 +684,17 @@ An accidental alters a note by: @item Raising its pitch: @itemize -@item A @notation{double sharp}, by two semitones (a whole tone) -@item A @notation{sharp}, by one semitone +@item By two semitones (a whole tone)—@notation{double sharp} +@item By one semitone—@notation{sharp} @end itemize @item Lowering its pitch: @itemize -@item A @notation{flat}, by one semitone -@item A @notation{double flat}, by two semitones (a whole tone) +@item By one semitone—@notation{flat} +@item By two semitones (a whole tone)—@notation{double flat} @end itemize -@item Canceling the effects of the key signature or previous accidentals. +@item Or canceling the effects of the key signature or previous accidentals. @end itemize @lilypond[quote,notime] @@ -717,7 +726,6 @@ An accidental alters a note by: } @end lilypond - @seealso @ref{alteration}, @ref{semitone}, @ref{whole tone}. @@ -734,7 +742,7 @@ DK: adagio, S: adagio, FI: adagio, hitaasti. -[Italian: @q{comfortable, easy}.] +[Italian: @q{comfortable, easy}] @itemize @@ -746,7 +754,6 @@ of sonatas, symphonies, etc. @end itemize - @seealso @ref{andante}, @ref{largo}, @ref{sonata}. @@ -763,7 +770,7 @@ DK: ?, S: ?, FI: häviten olemattomiin. -[Italian: @q{to nothing}.] Used with @notation{decrescendo} to indicate +[Italian: @q{to nothing}] Used with @notation{decrescendo} to indicate that the sound should fade away to nothing. @notation{Al niente} is indicated by circling the tip of the hairpin: @@ -790,9 +797,37 @@ Since one does not crescendo @emph{to} nothing, it is not correct to use @notation{al niente} with @notation{crescendo}. Instead, one should use @emph{dal niente} (@notation{@b{from} nothing}). +@seealso +@ref{crescendo}, @ref{dal niente}, @ref{decrescendo}, @ref{hairpin}. + + +@node alla breve +@section alla breve + +ES: ?, +I: ?, +F: alla breve, +D: ?, +NL: ?, +DK: ?, +S: ?, +FI: ?. + +[Italian: @q{on the breve}] Twice as fast as the notation indicates. + +Also called @notation{in cut-time}. The name derives from mensural notation, +where the @notation{tactus} (or beat) is counted on the semibreve (the modern +whole note). Counting @q{on the breve} shifts the tactus to the next longest +note value, which (in modern usage) effectively halves all note values. + +(In mensural notation, breves and semibreves can have a ternary relationship, in +which case @notation{alla breve} means thrice (not twice) as fast. In practice, +this complication may not have mattered, since Gaffurius's system of multiplex +proportions makes it easy to explicitly state which proportion is needed.) @seealso -@ref{crescendo}, @ref{decrescendo}, @ref{hairpin}. +@ref{breve}, @ref{hemiola}, @ref{mensural notation}, @ref{note value}, +@ref{proportion}, @ref{whole note}. @node allegro @@ -807,9 +842,8 @@ DK: allegro, S: allegro, FI: allegro, nopeasti. -[Italian: @q{cheerful}.] Quick tempo. Also used as a title for pieces in a quick -tempo, especially the first and last movements of a sonata. - +[Italian: @q{cheerful}] Quick tempo. Also used as a title for pieces in a +quick tempo, especially the first and last movements of a sonata. @seealso @ref{sonata}. @@ -830,6 +864,7 @@ FI: muunnettu. An alteration is the modification, raising or lowering, of a note's pitch. It is established by an accidental. +@c TODO: add second meaning from mensural notation @seealso @ref{accidental}. @@ -852,7 +887,6 @@ was a high male voice (hence the name), which by the use of falsetto reached the height of the female voice. This type of voice is also known as countertenor. - @seealso @ref{countertenor}. @@ -871,7 +905,6 @@ FI: alttoavain. C clef setting middle C on the middle line of the staff. - @seealso @ref{C clef}. @@ -893,9 +926,8 @@ Denotes a range of pitches for a given voice in a part of music. It may also denote the pitch range that a musical instrument is capable of playing. Sometimes anglicized to @emph{ambit} (pl. @emph{ambits}). - @seealso -None yet. +No cross-references. @node anacrusis @@ -911,7 +943,7 @@ S: upptakt, FI: kohotahti. An anacrusis (also known as pickup or upbeat) is an incomplete measure -of music before a section of music. It also refers to the initial +of music before a section of music. It also refers to the initial note(s) of a melody occurring in that incomplete measure. @lilypond[quote,relative=1] @@ -924,7 +956,6 @@ bes4. a8 bes4 c f,2. \bar "||" @end lilypond - @seealso @ref{measure}, @ref{meter}. @@ -964,13 +995,12 @@ DK: andante, S: andante, FI: andante, käyden. -[Italian: present participle of @emph{andare}, @q{to walk}.] +[Italian: present participle of @emph{andare}, @q{to walk}] Walking tempo/character. - @seealso -None yet. +No cross-references. @node appoggiatura @@ -1031,9 +1061,8 @@ An appoggiatura may have more notes preceding the main note. >> @end lilypond - @seealso -None yet. +No cross-references. @node arpeggio @@ -1048,7 +1077,7 @@ DK: arpeggio, akkordbrydning, S: arpeggio, FI: arpeggio, murtosointu. -[Italian: @q{harp-like, played like a harp}.] +[Italian: @q{harp-like, played like a harp}] @lilypond[quote,line-width=13\cm] \new PianoStaff << @@ -1081,9 +1110,8 @@ FI: arpeggio, murtosointu. >> @end lilypond - @seealso -None yet. +No cross-references. @node articulation @@ -1102,9 +1130,8 @@ Articulation refers to notation which indicates how a note or notes should be played. Slurs, accents, staccato, and legato are all examples of articulation. - @seealso -None yet. +No cross-references. @node ascending interval @@ -1121,9 +1148,8 @@ FI: nouseva intervalli. A distance between a starting lower note and a higher ending note. - @seealso -None yet. +No cross-references. @node augmented interval @@ -1138,7 +1164,6 @@ DK: forstørret interval, S: överstigande intervall, FI: ylinouseva intervalli. - @seealso @ref{interval}. @@ -1159,7 +1184,6 @@ FI: aika-arvojen pidentäminen. This is a placeholder for augmentation (wrt mensural notation). - @seealso @ref{diminution}, @ref{mensural notation}. @@ -1186,9 +1210,8 @@ emulate engraving. This required more skill than did engraving. @end itemize - @seealso -None yet. +No cross-references. @node B @@ -1205,7 +1228,6 @@ None yet. @item FI: H, h @end itemize - @seealso @ref{H}, @ref{Pitch names} @@ -1213,7 +1235,6 @@ None yet. @node backfall @section backfall - @seealso @ref{appoggiatura}. @@ -1221,7 +1242,6 @@ None yet. @node bar @section bar - @seealso @ref{measure}. @@ -1243,7 +1263,6 @@ separates measures. Used very infrequently during the Renaissance (mostly in secular music, or in sacred music to indicate congruences between parts in otherwise-unmetered music). - @seealso @ref{measure}. @@ -1264,7 +1283,6 @@ The male voice intermediate in pitch between the bass and the tenor. @c F: clef de troisième ligne dropped - @seealso @ref{bass}, @ref{tenor}. @@ -1283,7 +1301,6 @@ FI: baritoniavain. C or F clef setting middle C on the upper staff line. - @seealso @ref{C clef}, @ref{F clef}. @@ -1309,7 +1326,6 @@ double bass. @end itemize - @seealso @ref{strings}. @@ -1328,7 +1344,6 @@ FI: bassoavain. A clef setting with middle C on the first top ledger line. - @seealso @ref{F clef}. @@ -1355,7 +1370,6 @@ g32-"1/32"[ s g s g s g] s16 g64-"1/64"[ s32 g64 s32 g64 s32 g64] s32 @end lilypond - @seealso @ref{feathered beam}. @@ -1384,7 +1398,6 @@ g4 c b a | g1 \bar "||" g8 d' c | b c a | g4. \bar "||" @end lilypond - @seealso @ref{time signature}. @@ -1392,7 +1405,6 @@ g8 d' c | b c a | g4. \bar "||" @node beat repeat @section beat repeat - @seealso @ref{percent repeat}. @@ -1400,7 +1412,6 @@ g8 d' c | b c a | g4. \bar "||" @node bind @section bind - @seealso @ref{tie}. @@ -1461,9 +1472,8 @@ Angular brackets for connecting parts in an orchestral or choral score: >> @end lilypond - @seealso -None yet. +No cross-references. @node bracket @@ -1478,7 +1488,6 @@ DK: ?, S: ?, FI: sulkumerkki. - @seealso @ref{brace} @@ -1499,9 +1508,8 @@ A family of blown musical instruments made of brass, all using a cup formed mouth piece. The brass instruments commonly used in a symphony orchestra are trumpet, trombone, french horn, and tuba. - @seealso -None yet. +No cross-references. @node breath mark @@ -1518,7 +1526,6 @@ FI: hengitysmerkki. Indication of where to breathe in vocal and wind instrument parts. - @seealso @ref{caesura}. @@ -1527,28 +1534,29 @@ Indication of where to breathe in vocal and wind instrument parts. @section breve @itemize -@item US: breve, double-whole note, -@item ES: cuadrada, breve, -@item I: breve, -@item F: brève, -@item D: Brevis, -@item NL: brevis, -@item DK: brevis, -@item S: brevis, -@item FI: brevis, kaksoiskokonuotti. +@item US: breve, double-whole note +@item ES: cuadrada, breve +@item I: breve +@item F: brève +@item D: Brevis +@item NL: brevis +@item DK: brevis +@item S: brevis +@item FI: brevis, kaksoiskokonuotti @end itemize -Note value twice as long as a whole note. Mainly used in pre-1650 music. -The shortest note value generally used in white mensural notation, hence the -name, which originally meant @q{of short duration}. +Note value: twice the length of a @notation{whole note} (@notation{semibreve}). + +Mainly used in music from before 1650. In mensural notation, it was a note +of fairly short duration—hence the name, which is Latin for @q{short} or +@q{of short duration}. @lilypond[quote,notime,relative=2] g\breve @end lilypond - @seealso -@ref{mensural notation}, @ref{note value}. +@ref{mensural notation}, @ref{note value}, @ref{semibreve}. @node C @@ -1565,7 +1573,6 @@ g\breve @item FI: C, c @end itemize - @seealso @ref{Pitch names}. @@ -1600,9 +1607,8 @@ lines. } @end lilypond - @seealso -None yet. +No cross-references. @node cadence @@ -1617,7 +1623,6 @@ DK: kadence, S: kadens, FI: kadenssi, lopuke. - @seealso @ref{harmonic cadence}, @ref{functional harmony}. @@ -1640,9 +1645,8 @@ chance to exhibit their technical skill and -- not last -- their ability to improvise. Since the middle of the 19th century, however, most cadenzas have been written down by the composer. - @seealso -None yet. +No cross-references. @node caesura @@ -1657,12 +1661,11 @@ DK: ?, S: ?, FI: välimerkki. -[Latin: from the supine of @emph{caedere} @q{to cut down}.] +[Latin: from the supine of @emph{caedere} @q{to cut down}] The break between two musical phrases, sometimes (but not always) marked by a rest or a breath mark. - @seealso @ref{breath mark}. @@ -1679,7 +1682,6 @@ DK: kanon, S: kanon, FI: kaanon, tarkka jäljittely. - @seealso @ref{counterpoint}. @@ -1700,7 +1702,6 @@ viritysjärjestelmässä. Logarithmic unit of measurement. 1@tie{}cent is 1/1200 of an octave (1/100 of an equally tempered semitone). - @seealso @ref{equal temperament}, @ref{semitone}. @@ -1708,7 +1709,6 @@ Logarithmic unit of measurement. 1@tie{}cent is 1/1200 of an octave @node central C @section central C - @seealso @ref{middle C}. @@ -1726,9 +1726,9 @@ S: ackord, FI: sointu. Three or more tones sounding simultaneously. In traditional European music -the base chord is a @emph{triad} consisting of two thirds. @emph{Major} +the base chord is a @emph{triad} consisting of two thirds. @emph{Major} (major + minor third) as well as @emph{minor} (minor + major third) chords -may be extended with more thirds. Four-tone @emph{seventh chords} and +may be extended with more thirds. Four-tone @emph{seventh chords} and five-tone @emph{ninth} major chords are most often used as dominants (functional harmony). Chords having no third above the lower notes to define their mood are a special case called @q{open chords}. The lack of @@ -1759,7 +1759,6 @@ minor. >> @end lilypond - @seealso @ref{functional harmony}, @ref{interval}, @ref{inversion}, @ref{quality}, @ref{third}. @@ -1783,7 +1782,6 @@ A scale consisting of all 12 semitones. c1 cis d dis e f fis g gis a ais b c @end lilypond - @seealso @ref{semitone}. @@ -1802,7 +1800,6 @@ FI: kromatiikka. Using tones extraneous to a diatonic scale (minor, major). - @seealso @ref{diatonic scale}. @@ -1819,7 +1816,6 @@ DK: kirketoneart, S: kyrkotonart, FI: moodi, kirkkosävellaji. - @seealso @ref{diatonic scale}. @@ -1855,7 +1851,7 @@ pitches. The three clef symbols in common use are: \musicglyph #"clefs.F" \strut \musicglyph #"clefs.C" - } + } } @end lilypond @@ -2043,7 +2039,6 @@ major chord. } @end lilypond - @seealso @ref{C clef}, @ref{F clef}, @ref{G clef}. @@ -2086,9 +2081,8 @@ pitch contained in the cluster would be notated as an ordinary note. \makeClusters { 4 8 } @end lilypond - @seealso -None yet. +No cross-references. @node comma @@ -2106,7 +2100,6 @@ FI: komma, korvinkuultava ero äänenkorkeudessa. Difference in pitch between a note derived from pure tuning and the same note derived from some other tuning method. - @seealso @ref{didymic comma}, @ref{Pythagorean comma}, @ref{syntonic comma}, @ref{temperament}. @@ -2136,7 +2129,6 @@ FI: C-merkintä. 4/4 time. The symbol, which resembles a capital letter C, comes from mensural notation. - @seealso @ref{mensural notation}, @ref{meter}. @@ -2153,7 +2145,6 @@ DK: komplementærinterval, S: komplementärintervall (?), FI: täydentävä intervalli. - @seealso @ref{inverted interval}. @@ -2172,7 +2163,6 @@ FI: oktaavia laajempi intervalli. Intervals larger than an octave. - @seealso @ref{interval}. @@ -2192,7 +2182,6 @@ FI: kolmijakoinen tahtilaji. A meter that includes a triplet subdivision within the beat, such as 6/8, 9/8, 12/8. - @seealso @ref{meter}, @ref{simple meter}. @@ -2248,7 +2237,6 @@ A time signature that additively combines two or more unequal meters, e.g., } @end lilypond - @seealso @ref{compound meter}, @ref{meter}, @ref{polymetric time signature}. @@ -2317,7 +2305,6 @@ are, technically speaking, @emph{transposing instruments}: @end itemize - @seealso @ref{transposing instrument}. @@ -2343,7 +2330,6 @@ Progressing melodically by intervals of a second, as contrasted with g4 g g a | b2 a | g4 b a a | g1 \bar "||" @end lilypond - @seealso @ref{disjunct movement}. @@ -2360,7 +2346,6 @@ DK: konsonans, S: konsonans, FI: konsonanssi, sopusointi. - @seealso @ref{harmony}. @@ -2377,7 +2362,6 @@ DK: alt, S: alt, FI: kontra-altto. - @seealso @ref{alto}. @@ -2393,9 +2377,8 @@ music typesetters. @c Copying music required more skill than engraving. Flagged for NPOV - @seealso -None yet. +No cross-references. @node counterpoint @@ -2454,9 +2437,8 @@ has been one of the most popular polyphonic composition methods. >> @end lilypond - @seealso -None yet. +No cross-references. @node countertenor @@ -2471,7 +2453,6 @@ DK: kontratenor, S: kontratenor, counter tenor, FI: kontratenori. - @seealso @ref{contralto}. @@ -2497,7 +2478,6 @@ Increasing volume. Indicated by a rightwards opening horizontal wedge g4\< a b c | d1\! \bar "|." @end lilypond - @seealso @ref{decrescendo}, @ref{hairpin}. @@ -2517,9 +2497,8 @@ FI: vihjenuotit. In a separate part notes belonging to another part with the purpose of hinting when to start playing. Usually printed in a smaller type. - @seealso -None yet. +No cross-references. @node custos @@ -2562,9 +2541,15 @@ they have survived only in special forms of musical notation such as the } @end lilypond +@seealso +No cross-references. + + +@node cut time +@section cut time @seealso -None yet. +@ref{alla breve}. @node D @@ -2581,7 +2566,6 @@ None yet. @item FI: D, d @end itemize - @seealso @ref{Pitch names} @@ -2601,9 +2585,8 @@ FI: da capo, alusta. Abbreviated @notation{D.C.}. Indicates that the piece is to be repeated from the beginning to the end or to a certain place marked @emph{fine}. - @seealso -None yet. +No cross-references. @node dal niente @@ -2618,10 +2601,9 @@ DK: ?, S: ?, FI: tyhjästä ilmaantuen. -[Italian: @q{from nothing}.] Used with @notation{crescendo} to indicate +[Italian: @q{from nothing}] Used with @notation{crescendo} to indicate that the sound should gradually increase from nothing. - @seealso @ref{al niente}. @@ -2655,9 +2637,8 @@ b4 a g2-\markup { \bar "|." @end lilypond - @seealso -None yet. +No cross-references. @node decrescendo @@ -2680,7 +2661,6 @@ wedge (hairpin) or the abbreviation @notation{decresc.}. d4\> c b a | g1 \! \bar "|." @end lilypond - @seealso @ref{crescendo}, @ref{diminuendo}, @ref{hairpin}. @@ -2699,9 +2679,8 @@ FI: laskeva intervalli. A distance between a starting higher note and a lower ending note. - @seealso -None yet. +No cross-references. @node diatonic scale @@ -2716,7 +2695,7 @@ DK: diatonisk skala, S: diatonisk skala, FI: diatoninen asteikko. -A scale consisting of 5@w{ }whole tones and 2@w{ }semitones (S). Scales +A scale consisting of 5@w{ }whole tones and 2@w{ }semitones (S). Scales played on the white keys of a piano keybord are diatonic. These scales are sometimes called, somewhat inaccurately, @q{church modes}). @@ -2816,7 +2795,7 @@ the 6th and 7th tone. c1 d e^"~~ S" f g a b^"~~ S" c } - \lyrics { + \lyrics { Major } >> @@ -2828,7 +2807,7 @@ the 6th and 7th tone. a1 b^"~~ S" c d e^"~~ S" f g a } - \lyrics { + \lyrics { "Ancient (or Natural) minor" } >> @@ -2852,7 +2831,7 @@ the 6th and 7th tone. a1 b^"~~ S" c d e fis gis^"~~ S" a } - \lyrics { + \lyrics { "Melodic minor ascending" } >> @@ -2864,13 +2843,12 @@ the 6th and 7th tone. a1 g! f!^"~~ S" e d c^"~~ S" b a } - \lyrics { + \lyrics { "Melodic minor descending" } >> @end lilypond - @seealso @ref{semitone}, @ref{whole tone}. @@ -2878,7 +2856,6 @@ the 6th and 7th tone. @node didymic comma @section didymic comma - @seealso @ref{syntonic comma}. @@ -2895,7 +2872,6 @@ DK: formindsket interval, S: förminskat intervall, FI: vähennetty intervalli. - @seealso @ref{interval}. @@ -2912,7 +2888,6 @@ DK: diminuendo, S: diminuendo, FI: diminuendo, hiljentyen. - @seealso @ref{decrescendo}. @@ -2931,7 +2906,6 @@ FI: aika-arvojen tihennys. This is a stub for diminution (@emph{wrt} mensural notation). - @seealso @ref{augmentation}, @ref{mensural notation}. @@ -2948,7 +2922,6 @@ DK: ?, S: ?, FI: suora. - @seealso @ref{custos}. @@ -2971,13 +2944,12 @@ with conjunct movement. @lilypond[quote,relative=1] \key a \major \time 4/4 -\partial 8 e8 | -a4. gis8 b a e cis | -fis2 d4. -\bar "||" + \partial 8 e8 | + a4. gis8 b a e cis | + fis2 d4. + \bar "||" } @end lilypond - @seealso @ref{conjunct movement}. @@ -2987,7 +2959,6 @@ fis2 d4. Another name for @ref{dissonant interval}. - @seealso @ref{dissonant interval}, @ref{harmony}. @@ -3004,7 +2975,6 @@ DK: dissonerende interval, dissonans, S: dissonans, FI: dissonanssi, dissonoiva intervalli, riitasointi. - @seealso @ref{harmony}. @@ -3040,9 +3010,8 @@ section in a long antiphonal or responsorial chant. TODO: musical example here? - @seealso -None yet. +No cross-references. @node doit @@ -3060,7 +3029,6 @@ FI: nousu. Indicator for a indeterminately rising pitch bend. Compare with @emph{glissando}, which has determinate starting and ending pitches. - @seealso @ref{fall}, @ref{glissando}. @@ -3079,7 +3047,6 @@ FI: dominantti, huippusointu. The fifth @emph{scale degree} in @emph{functional harmony}. - @seealso @ref{functional harmony}, @ref{scale degree}. @@ -3096,7 +3063,6 @@ DK: dominantnoneakkord, S: dominantnonackord, FI: dominanttinoonisointu. - @seealso @ref{chord}, @ref{functional harmony}. @@ -3113,7 +3079,6 @@ DK: dominantseptimakkord, S: dominantseptimackord, FI: dominanttiseptimisointu. - @seealso @ref{chord}, @ref{functional harmony}. @@ -3130,7 +3095,6 @@ DK: dorisk skala, S: dorisk tonart, FI: doorinen moodi. - @seealso @ref{diatonic scale}. @@ -3147,7 +3111,6 @@ DK: punkt, S: punkt, FI: piste. - @seealso @ref{dotted note}, @ref{note value}. @@ -3164,7 +3127,6 @@ DK: punkteret node, S: punkterad not, FI: pisteellinen nuotti. - @seealso @ref{note value}. @@ -3181,7 +3143,6 @@ DK: dobbelt forslag, S: dubbelslag, FI: kaksoisappogiatura, kaksoisetuhele. - @seealso @ref{appoggiatura}. @@ -3200,9 +3161,8 @@ FI: kaksoistahtiviiva. Indicates the end of a section within a movement. - @seealso -None yet. +No cross-references. @node double dotted note @@ -3217,7 +3177,6 @@ DK: dob@-belt@-punk@-te@-ret node, S: dub@-bel@-punk@-te@-rad not, FI: kaksoispisteellinen nuotti. - @seealso @ref{note value}. @@ -3234,7 +3193,6 @@ DK: dob@-belt-b, S: dubbelbe, FI: kaksoisalennusmerkki. - @seealso @ref{accidental}. @@ -3251,7 +3209,6 @@ DK: dob@-belt@-kryds, S: dubbelkors, FI: kaksoisylennysmerkki. - @seealso @ref{accidental}. @@ -3268,7 +3225,6 @@ DK: ?, S: ?, FI: kaksois-aika-arvomerkintä. - @seealso @ref{polymetric time signature}. @@ -3287,9 +3243,8 @@ FI: kaksoistrilli. A simultaneous trill on two notes, usually in the distance of a third. - @seealso -None yet. +No cross-references. @node duple meter @@ -3304,7 +3259,6 @@ DK: todelt takt, S: tvÃ¥takt, FI: kaksoistempo. - @seealso @ref{meter}. @@ -3321,7 +3275,6 @@ DK: duol, S: duol, FI: duoli. - @seealso @ref{note value}. @@ -3338,7 +3291,6 @@ DK: varighed, S: tonlängd, FI: kesto, aika-arvo. - @seealso @ref{note value}. @@ -3359,7 +3311,6 @@ The aspect of music relating to degrees of loudness, or changes from one degree to another. The terms, abbreviations, and symbols used to indicate this information are called dynamic marks. - @seealso @ref{piano}, @ref{forte}, @ref{crescendo}, @ref{decrescendo}, @ref{diminuendo}. @@ -3379,7 +3330,6 @@ indicate this information are called dynamic marks. @item FI: E, e @end itemize - @seealso @ref{Pitch names} @@ -3387,7 +3337,6 @@ indicate this information are called dynamic marks. @node ecclesiastical mode @section ecclesiastical mode - @seealso @ref{church mode}, @ref{diatonic scale}. @@ -3407,7 +3356,6 @@ indicate this information are called dynamic marks. @item FI: kahdeksasosanuotti @end itemize - @seealso @ref{note value}. @@ -3427,7 +3375,6 @@ indicate this information are called dynamic marks. @item FI: kahdeksasosatauko @end itemize - @seealso @ref{note value}. @@ -3453,7 +3400,6 @@ The singing of several syllables on a single note. Elision may be indicated by a lyric tie, which looks like (and serves the same function) as a musical tie. - @seealso @ref{lyric tie}. @@ -3461,7 +3407,6 @@ tie. @node embellishment @section embellishment - @seealso @ref{ornament}. @@ -3485,9 +3430,8 @@ drafting or engineering drawing, using similar tools. The traditional process of music printing is done through cutting in a plate of metal. Now also the term for the art of music typesetting. - @seealso -None yet. +No cross-references. @node enharmonic @@ -3518,9 +3462,8 @@ names but equal pitch. >> @end lilypond - @seealso -None yet. +No cross-references. @node equal temperament @@ -3538,7 +3481,6 @@ FI: tasavireinen. A tuning system that divides the octave into 12 equal semitones (each of which is precisely equal to 100 cents). - @seealso @ref{cent}, @ref{semitone}, @ref{temperament}. @@ -3566,7 +3508,6 @@ Performance indications concerning: @end itemize - @seealso @ref{allegro}, @ref{andante}, @ref{crescendo}, @ref{forte}. @@ -3621,7 +3562,6 @@ by the given number of octaves. @end itemize - @seealso @ref{melisma}, @ref{sul G}, @ref{thorough bass}, @ref{octave mark}, @ref{octave marking}. @@ -3641,7 +3581,6 @@ by the given number of octaves. @item FI: F, f @end itemize - @seealso @ref{Pitch names} @@ -3690,7 +3629,6 @@ Double Bass). >> @end lilypond - @seealso @ref{baritone clef}, @ref{strings}. @@ -3710,7 +3648,6 @@ FI: lasku. Indicator for a indeterminately falling pitch bend. Compare with @emph{glissando}, which has determinate starting and ending pitches. - @seealso @ref{doit}, @ref{glissando}. @@ -3732,7 +3669,6 @@ played at an increasing or decreasing tempo -- depending on the direction of @q{feathering} -- but without changing the overall tempo of the piece. - @seealso Internals Reference: @ruser{Manual beams} @@ -3759,9 +3695,8 @@ a4 b c2^\fermata \bar "|." @end lilypond - @seealso -None yet. +No cross-references. @node fifth @@ -3776,7 +3711,6 @@ DK: kvint, S: kvint, FI: kvintti. - @seealso @ref{interval}. @@ -3833,7 +3767,6 @@ played above the bass notes. >> @end lilypond - @seealso @ref{chord}, @ref{interval}. @@ -3853,9 +3786,8 @@ FI: sormitus. Figures to the side or above the note that methodically indicate which fingers to use while playing a passage. - @seealso -None yet. +No cross-references. @node flag @@ -3880,7 +3812,6 @@ g32-"32nd" s8 g64-"64th" s8 @end lilypond - @seealso @ref{note value}. @@ -3910,7 +3841,6 @@ Also: @end itemize - @seealso @ref{articulation}, @ref{harmonics}. @@ -3927,7 +3857,6 @@ DK: b, S: beförtecken, FI: alennusmerkki. - @seealso @ref{accidental}. @@ -3935,7 +3864,6 @@ FI: alennusmerkki. @node forefall @section forefall - @seealso @ref{appoggiatura}. @@ -3952,7 +3880,7 @@ DK: forte, S: forte, FI: forte, voimakkaasti. -[Italian: @q{loud}.] +[Italian: @q{loud}] Abbreviated @notation{@b{f}}. Variants include: @@ -3961,9 +3889,8 @@ Abbreviated @notation{@b{f}}. Variants include: @item @emph{fortissimo}, very loud (notated @notation{@b{ff}}). @end itemize - @seealso -None yet. +No cross-references. @node fourth @@ -3978,7 +3905,6 @@ DK: kvart, S: kvart, FI: kvartti. - @seealso @ref{interval}. @@ -4004,7 +3930,6 @@ The specific rules for @q{frenching} a score differ from publisher to publisher. If you are producing scores for eventual publication by a commercial publisher, you may wish to procure a copy of their style manual. - @seealso @ref{Frenched staff}. @@ -4025,7 +3950,6 @@ FI: karsittu nuotinnus. Frenched staff has unneeded measures or sections removed. This is useful for producing, for example, an @emph{ossia} staff. - @seealso @ref{ossia}. @@ -4048,7 +3972,6 @@ DK: fuga, S: fuga, FI: fuuga. - @seealso @ref{counterpoint}. @@ -4088,9 +4011,8 @@ TODO: what does the @q{p} mean in Sp, Dp, Tp? >> @end lilypond - @seealso -None yet. +No cross-references. @node G @@ -4107,7 +4029,6 @@ None yet. @item FI: G, g @end itemize - @seealso @ref{Pitch names} @@ -4133,27 +4054,26 @@ part in modern choral scores). @lilypond[quote,notime] \relative c'' { \override Staff.Clef #'full-size-change = ##t - \set Score.proportionalNotationDuration = #(ly:make-moment 1 8) - \clef french - g1 - \clef treble - g1 - \clef "G^8" - g1 - \clef "G_8" - g1 + \set Score.proportionalNotationDuration = #(ly:make-moment 1 8) + \clef french + g1 + \clef treble + g1 + \clef "G^8" + g1 + \clef "G_8" + g1 } \addlyrics { - "french violin clef" - "violin clef" - "octave up" - "octave down" -} + "french violin clef" + "violin clef" + "octave up" + "octave down" + } @end lilypond - @seealso -None yet. +No cross-references. @node glissando @@ -4170,9 +4090,8 @@ FI: glissando, liukuen. Letting the pitch slide fluently from one note to the other. - @seealso -None yet. +No cross-references. @node grace notes @@ -4190,7 +4109,6 @@ FI: korunuotit. Notes printed in small types to indicate that their time values are not counted in the rhythm of the bar. - @seealso @ref{acciaccatura}, @ref{appoggiatura}, @ref{grace notes}, @ref{ornament}. @@ -4210,7 +4128,6 @@ FI: kaksoisnuottiviivasto. A combination of two staves with a brace. Usually used for piano music. - @seealso @ref{brace}. @@ -4229,15 +4146,13 @@ FI: grave, raskaasti. [Italian] Slow, solemn. - @seealso -None yet. +No cross-references. @node gruppetto @section gruppetto - @seealso @ref{turn}. @@ -4260,7 +4175,6 @@ Letter name used for @notation{B natural} in German and Scandinavian usage. In the standard usage of these countries, @notation{B} means @notation{B flat}. - @seealso @ref{Pitch names}, @ref{B}. @@ -4277,7 +4191,6 @@ c2\> c\< c1\! @end lilypond - @seealso @ref{crescendo}, @ref{decrescendo}. @@ -4297,7 +4210,6 @@ c1\! @item FI: puolinuotti. @end itemize - @seealso @ref{note value}. @@ -4317,7 +4229,6 @@ c1\! @item FI: puolitauko. @end itemize - @seealso @ref{note value}. @@ -4386,9 +4297,8 @@ For instruments of the violin family, there are two types of harmonics: natural harmonics, which are those played on the open string; and artificial harmonics, which are produced on stopped strings. - @seealso -None yet. +No cross-references. @node harmony @@ -4409,26 +4319,25 @@ categories @emph{consonances} and @emph{dissonances}. Consonances: @lilypond[quote,notime,relative=2,line-width=13.0\cm] -1_"unison " s -_"third " s -_"fourth " s -_"fifth " s -_"sixth " s -_"octave " s -_"tenth" s s + 1_"unison " s + _"third " s + _"fourth " s + _"fifth " s + _"sixth " s + _"octave " s + _"tenth" s s @end lilypond Dissonances: @lilypond[quote,notime,relative=2,line-width=13.0\cm] -1_"second " s s -_"seventh " s s -_"ninth" s s + 1_"second " s s + _"seventh " s s + _"ninth" s s @end lilypond For harmony that uses three or more notes, see @ref{chord}. - @seealso @ref{chord}. @@ -4475,7 +4384,6 @@ c1. \bar "||" and is therefore a polymeter (second definition) of considerable antiquity. - @seealso @ref{mensural notation}, @ref{meter}, @ref{polymeter}, @ref{proportion}. @@ -4495,10 +4403,10 @@ FI: homofonia, yksiäänisyys. Music in which one voice leads melodically supported by the other voices in the same rhythm (more or less). In contrast to @emph{polyphony}. - @seealso @ref{polyphony}. + @node hymn meter @section hymn meter @@ -4536,9 +4444,8 @@ easier reading, a hymn with a meter of 87.87.87.87 is usually written @item 88.88.88.88 is Double Long Meter (DLM or D.L.M.) @end itemize - @seealso -None yet. +No cross-references. @node interval @@ -4553,70 +4460,64 @@ DK: interval, S: intervall, FI: intervalli, kahden sävelen korkeusero. -Difference in pitch between two notes. Intervals may be perfect, minor, -major, diminished, or augmented. The augmented fourth and the diminished -fifth are identical (or @emph{enharmonic}) and are called @emph{tritonus} -because they consist of three whole tones. The addition of such two -intervals forms an octave. - -@lilypond[quote,notime,line-width=13.0\cm] -<< - \context Voice \relative c'' { - < g g >1 - < g as >^"minor" - < g a! > - < g ais >^"augm" - < gis bes >^"dimin" - < g! bes >^"minor" - < g b! >^"major" - < g bis >^"augm" - } - \context Lyrics \lyrics { - "unison " "second " "second " "second " - "third " "third " "third " "third" - } ->> -@end lilypond - -@lilypond[quote,notime,line-width=13.0\cm] -<< - \context Staff \relative c'' { - 1^"perfect" - ^"aug" - ^"perfect" - ^"dim" - ^"dim" - ^"minor" - ^"major" - ^"aug" - } - \lyrics { - "fourth " "fourth " "fifth " "fifth " - "sixth " "sixth " "sixth " "sixth" - } ->> -@end lilypond +Difference in pitch between two notes. Intervals may be diminished, minor, +perfect, major, or augmented. The augmented fourth and the diminished fifth are +identical (or @emph{enharmonic}) and are called @emph{tritonus} because they +consist of three whole tones. The addition of such two intervals forms an +octave. @lilypond[quote,notime,line-width=13.0\cm] << - \context Staff \relative c'' { - 1^"dimin" - ^"minor" - ^"major" - - ^"minor" - ^"major" - ^"minor" - ^"major" - } - \context Lyrics \lyrics { - "seventh " "seventh " "seventh " "octave " - "ninth " "ninth " "tenth " "tenth" - } +\context Voice \relative c'' { +% Prime or unison + < g g >1 + < g gis >^"aug" +% Second + < gis as >^"dim" + < g! as >^"min" + < g a! >^"maj" + < g ais >^"aug" +% Third + < gis bes >^"dim" + < g! bes >^"min" + < g b! >^"maj" + < g bis >^"aug" +% Fourth + < g ces >^"dim" + < g c! >^"per" + < g cis >^"aug" +% Fifth + < g des' >^"dim" + < g d' >^"per" + < g dis >^"aug" +% Sixth + < gis es' >^"dim" + < g! es' >^"min" + < g e'! >^"maj" + < g eis' >^"aug" +% Seventh + < gis f'! >^"dim" + < g! f'! >^"min" + < g fis' >^"maj" + < g fisis' >^"aug" +% Octave + < g ges' >^"dim" + < g g' >^"per" + < g gis' >^"aug" +} +\context Lyrics \lyrics { + "unison " "unison " + "second " "second " "second " "second " + "third " "third " "third " "third " + "fourth " "fourth " "fourth " + "fifth " "fifth " "fifth " + "sixth " "sixth " "sixth " "sixth " + "seventh" "seventh" "seventh" "seventh" + "octave " "octave " "octave " +} >> @end lilypond - @seealso @ref{enharmonic}, @ref{whole tone}. @@ -4659,9 +4560,8 @@ marked as @notation{I6/4} or @notation{Ic}. Second inversion is the most unstable chord position. @end table - @seealso -None yet. +No cross-references. @node inverted interval @@ -4684,9 +4584,8 @@ The difference between an interval and an octave. _"fourth " s s _"fifth " s s \bar "||" @end lilypond - @seealso -None yet. +No cross-references. @node just intonation @@ -4704,7 +4603,6 @@ FI: puhdas viritys. Tuning system in which the notes are obtained by adding and subtracting natural fifths and thirds. - @seealso @ref{temperament}. @@ -4724,7 +4622,6 @@ FI: tonaliteetti. According to the 12@w{ }tones of the @emph{chromatic scale} there are 12@w{ }keys, one on@w{ }c, one on c-sharp, etc. - @seealso @ref{chromatic scale}, @ref{key signature}. @@ -4744,7 +4641,6 @@ FI: sävellajiosoitus. The sharps or flats appearing at the beginning of each staff indicating the key of the music. - @seealso @ref{accidental}. @@ -4761,12 +4657,11 @@ DK: ?, S: ?, FI: antaa väristä. -[French: @q{Let vibrate}.] Most frequently associated with harp +[French: @q{Let vibrate}] Most frequently associated with harp parts. Marked @notation{l.v.} in the score. - @seealso -None yet. +No cross-references. @node largo @@ -4801,9 +4696,8 @@ The seventh @emph{scale degree}, a @emph{semitone} below the tonic; so called because of its strong tendency to @q{lead up} (resolve upwards) to the tonic scale degree. - @seealso -@ref{scale degree}, @ref{semitone}. +@ref{scale degree, @ref{semitone}. @node ledger line @@ -4826,9 +4720,8 @@ s1 c''1 @end lilypond - @seealso -None yet. +No cross-references. @node legato @@ -4864,7 +4757,6 @@ notes, unlike (b) @notation{leggiero} or @notation{non-legato}, (c) >> @end lilypond - @seealso @ref{staccato}. @@ -4872,7 +4764,6 @@ notes, unlike (b) @notation{leggiero} or @notation{non-legato}, (c) @node legato curve @section legato curve - @seealso @ref{slur}, @ref{legato}. @@ -4880,7 +4771,6 @@ notes, unlike (b) @notation{leggiero} or @notation{non-legato}, (c) @node leger line @section leger line - @seealso @ref{ledger line}. @@ -4906,7 +4796,6 @@ performance in the sense of articulation. With the invention of the metric system of the white mensural notation, the need for ligatures to denote such patterns disappeared. - @seealso @ref{mensural notation}. @@ -4928,9 +4817,8 @@ A pond with lilies floating in it. Also, the name of a music typesetting program. - @seealso -None yet. +No cross-references. @node line @@ -4945,7 +4833,6 @@ DK: nodelinie, S: notlinje, FI: viiva, nuottiviiva. - @seealso @ref{staff}. @@ -4962,10 +4849,9 @@ DK: ?, S: ?, FI: kirjoitetussa äänenkorkeudessa. -[Italian: @q{place}.] Instruction to play the following passage at the +[Italian: @q{place}] Instruction to play the following passage at the written pitch. Cancels octave mark (q.v.). - @seealso @ref{octave mark}, @ref{octave marking}. @@ -4982,7 +4868,6 @@ DK: langt forslag, S: lÃ¥ngt förslag, FI: pitkä appoggiatura, pitkä etuhele. - @seealso @ref{appoggiatura}. @@ -5002,14 +4887,13 @@ FI: pitkä appoggiatura, pitkä etuhele. @item FI: longa. @end itemize -Note value: double length of @notation{breve}. +Note value: twice the length of a @notation{breve}. @lilypond[quote,notime,relative=2] \override NoteHead #'style = #'mensural g\longa g\breve @end lilypond - @seealso @ref{breve}, @ref{note value}. @@ -5028,7 +4912,6 @@ FI: sidonta sanoituksessa. @c TODO: add languages - @seealso @ref{elision}. @@ -5047,9 +4930,8 @@ FI: sanoitus. @c Definition? - @seealso -None yet. +No cross-references. @node major @@ -5064,7 +4946,6 @@ DK: dur, S: dur, FI: duuri. - @seealso @ref{diatonic scale}. @@ -5081,11 +4962,34 @@ DK: stort interval, S: stort intervall, FI: suuri intervalli. - @seealso @ref{interval}. +@node maxima +@section maxima + +ES: ?, +I: ?, +F: ?, +D: ?, +NL: ?, +DK: ?, +S: ?, +FI: ?. + +Note value: twice the length of a @notation{longa}. + +The maxima is the largest duration in use during the 15th and 16th centuries. +Like the longa, the maxima can be either two or three times as long as the +@notation{longa} (called @notation{binary} and @notation{ternary}, +respectively). By the late 15th century, most composers used the smaller +proportion by default. + +@seealso +@ref{Duration names notes and rests}, @ref{longa}, @ref{note values}. + + @node meantone temperament @section meantone temperament @@ -5103,7 +5007,6 @@ fifth by 16@w{ }cents. Due to the non-circular character of this temperament only a limited set of keys are playable. Used for tuning keyboard instruments for performance of pre-1650 music. - @seealso @ref{cent}, @ref{temperament}. @@ -5124,7 +5027,6 @@ A group of beats (units of musical time) the first of which bears an accent. Such groups in numbers of two or more recur consistently throughout the composition and are separated from each other by bar lines. - @seealso @ref{bar line}, @ref{beat}, @ref{meter}. @@ -5132,7 +5034,6 @@ composition and are separated from each other by bar lines. @node measure repeat @section measure repeat - @seealso @ref{percent repeat}. @@ -5159,7 +5060,6 @@ mediant (variant tonic). @end itemize - @seealso @ref{chord}, @ref{functional harmony}, @ref{relative key}. @@ -5179,9 +5079,8 @@ FI: melisma, laulettavan tavun sävelkuvio. A melisma (Greek: plural @emph{melismata}) is a group of notes or tones sung on one syllable, especially as applied to liturgical chant. - @seealso -None yet. +No cross-references. @node melisma line @@ -5198,7 +5097,6 @@ DK: ?, S: ?, FI: melismaviiva. - @seealso @ref{extender line}. @@ -5206,7 +5104,6 @@ FI: melismaviiva. @node melodic cadence @section melodic cadence - @seealso @ref{cadenza}. @@ -5253,12 +5150,126 @@ colored) notes in the earlier notation. ... TODO: add to definition (including summary info on proportional notation) - @seealso @ref{augmentation}, @ref{diminution}, @ref{ligature}, @ref{proportion}. @c TODO: more cross-references? +@node mensuration sign +@section mensuration sign + +@c TODO: add languages + +ES: ?, +I: ?, +F: ?, +D: ?, +NL: ?, +DK: ?, +S: ?, +FI: ?. + +The ancestor of the time signature, mensuration signs were used to indicate the +relationship between two sets of note durations—specifically, the ratio of +breves to semibreves (called @notation{tempus}), and of semibreves to minims +(called @notation{prolatio}). + +Each ratio was represented with a single single sign, and was either +three-to-one (ternary) or two-to-one (binary), as in modern music notation. +Unlike modern music notation, the @emph{ternary} ratio was the preferred +one—applied to the @emph{tempus}, it was called @emph{perfect}, and was +represented by a complete circle; applied to the @emph{prolatio}, it was called +@emph{major} and was represented by a dot in the middle of the sign. The binary +ratio applied to the @emph{tempus} was called @emph{imperfect}, and was +represented by an incomplete circle; applied to @emph{prolatio}, it was called +@emph{minor} and was represented by the lack of an internal dot. There are four +possible combinations, which can be represented in modern time signatures with +and without reduction of note values. (These signs are hard-coded in LilyPond +with reduction.) + +@table @dfn +@item perfect @emph{tempus} with major @emph{prolatio} +Indicated by a complete circle with an internal dot. In modern time signatures, +this equals: +@itemize +@item 9/4, with reduction or +@item 9/2, without reduction +@end itemize + +@item perfect @emph{tempus} and minor @emph{prolatio} +Indicated by a complete circle without an internal dot. In modern time +signatures, this equals: +@itemize +@item 3/2, with reduction or +@item 3/1, without reduction +@end itemize + +@item imperfect @emph{tempus} and major @emph{prolatio} +Indicated by an incomplete circle with an internal dot. In modern time +signatures, this equals: +@itemize +@item 6/4, with reduction or +@item 6/2, without reduction +@end itemize + +@item imperfect @emph{tempus} and minor @emph{prolatio} +Indicated by an incomplete circle without an internal dot. In modern time +signatures, this equals: +@itemize +@item 4/4, with reduction or +@item 2/1, without reduction +@end itemize +@end table + +The last mensuration sign @emph{looks} like common-time because it @emph{is}, +with note values reduced from the original semibreve to a modern quarter note. +Being doubly imperfect, this sign represented the (theoretically) +least-preferred mensuration, but it was actually used fairly often. + +This system extended to the ratio of longer note values to each other: + +@itemize + + @item maxima to longa, called: + + @itemize + + @item @notation{modus maximorum}, + @item @notation{modus major}, or + @item @notation{maximodus}) + + @end itemize + + @item longa to breve, called: + + @itemize + + @item @notation{modus longarum}, + @item @notation{modus minor}, or + @item @notation{modus} + + @end itemize + +@end itemize + +In the absence of any other indication, these modes were assumed to be +binary. The mensuration signs only indicated tempus and prolatio, so +composers needed another way to indicate these longer ratios (called modes. +Around the middle of the 15th century started to use groups of rests at the +beginning of the staff, preceding the mensuration sign. + + +Two mensuration signs have survived to the present day: the C-shaped sign, +which originally designated @notation{tempus imperfectum} and +@notation{prolatio minor} now stands for @notation{common time}; and the +slashed C, which designated the same with @notation{diminution} now stands +for @notation{cut-time} (essentially, it has not lost its original meaning). + +@seealso +@ref{diminution}, @ref{proportion}, @ref{time signature}. +@c TODO: more cross-references? + + @node meter @section meter @@ -5373,7 +5384,6 @@ Simple quintuple meter (B. Marcello, 1686-1739): the source, with sharps in the accompaniment where the voice has flats and @emph{vice versa}.) - Compound duple meter (unknown): @lilypond[quote,line-width=13.0\cm] @@ -5411,7 +5421,6 @@ Compound quadruple meter (P. Yon, 1886-1943): TODO: add information from discussion on lilypond-user related to polymeter. - @seealso @ref{accent}, @ref{hemiola}, @ref{note value}, @ref{time signature} @@ -5436,7 +5445,6 @@ divisions, and patented it as a @q{metronome}. The inevitable lawsuit that followed acknowledged Winkler as the creator, but by then Mälzel had already sold many of them, and people had taken to calling it a Mälzel Metronome. - @seealso @ref{metronome mark}. @@ -5457,7 +5465,6 @@ Exact tempo indication (in beats per minute). Abbreviated @notation{M.M.} or @notation{MM}, which is short for Mälzels Metronom (or Mälzel's Mark, @emph{anglice}). - @seealso @ref{metronome} @@ -5465,7 +5472,6 @@ Exact tempo indication (in beats per minute). Abbreviated @notation{M.M.} or @node metronomic indication @section metronomic indication - @seealso @ref{metronome mark} @@ -5482,7 +5488,7 @@ DK: ?, S: ?, FI: kohtalaisen, melko. -[Italian: @q{medium}.] +[Italian: @q{medium}] Used to qualify other indications, such as: @@ -5504,9 +5510,8 @@ Used to qualify other indications, such as: @end itemize - @seealso -None yet. +No cross-references. @node mezzo-soprano @@ -5523,7 +5528,6 @@ FI: mezzosopraano. The female voice between soprano and contralto. - @seealso @ref{soprano}, @ref{contralto}. @@ -5544,14 +5548,13 @@ First C below the 440 Hz A. @lilypond[quote,notime,relative=1] \override Staff.Clef #'full-size-change = ##t -\clef bass c1 s -\clef alto c s -\clef treble c s + \clef bass c1 s + \clef alto c s + \clef treble c s @end lilypond - @seealso -None yet. +No cross-references. @node minor @@ -5566,7 +5569,6 @@ DK: mol, S: moll, FI: molli. - @seealso @ref{diatonic scale}. @@ -5583,7 +5585,6 @@ DK: lille interval, S: litet intervall, FI: pieni intervalli. - @seealso @ref{interval}. @@ -5591,7 +5592,6 @@ FI: pieni intervalli. @node mixolydian mode @section mixolydian mode - @seealso @ref{diatonic scale}. @@ -5608,7 +5608,6 @@ DK: skala, S: modus, skala, FI: moodi, kirkkosävelasteikko. - @seealso @ref{church mode}, @ref{diatonic scale}. @@ -5629,9 +5628,8 @@ Moving from one @ref{key} to another. For example, the second subject of a @ref{sonata form} movement modulates to the dominant key if the key is major and to the @ref{relative key} if the key is minor. - @seealso -None yet. +No cross-references. @node mordent @@ -5645,7 +5643,6 @@ DK: mordent, S: mordent, FI: mordent, korukuvio. - @seealso @ref{ornament}. @@ -5653,7 +5650,6 @@ FI: mordent, korukuvio. @node motif @section motif - @seealso @ref{motive}. @@ -5691,9 +5687,8 @@ theme or subject. } @end lilypond - @seealso -None yet. +No cross-references. @node movement @@ -5708,13 +5703,11 @@ DK: sats, S: sats, FI: osa. -Greater musical works like @ref{symphony} and @ref{sonata} most often -consist of several -- more or less -- independent pieces called -movements. - +Greater musical works like @ref{symphony} and @ref{sonata} most often consist of +several -- more or less -- independent pieces called movements. @seealso -None yet. +No cross-references. @node multi-measure rest @@ -5745,7 +5738,6 @@ R1*122 a1 @end lilypond - @seealso @ref{longa}, @ref{breve}. @@ -5762,7 +5754,6 @@ DK: op@-løsningstegn, S: Ã¥terställningstecken, FI: palautusmerkki. - @seealso @ref{accidental}. @@ -5770,7 +5761,6 @@ FI: palautusmerkki. @node neighbor tones @section neighbor tones - @seealso @ref{acciaccatura}, @ref{appoggiatura}, @ref{grace notes}, @ref{ornament}. @@ -5788,7 +5778,6 @@ DK: none, S: nona, FI: nooni. - @seealso @ref{interval}. @@ -5796,7 +5785,6 @@ FI: nooni. @node non-legato @section non-legato - @seealso @ref{legato}. @@ -5819,9 +5807,8 @@ which produces the sound. However, a clear distinction between the terms tone and @ref{note} is strongly recommended. Briefly, one sees a note, and hears a tone. - @seealso -None yet. +No cross-references. @node note head @@ -5842,7 +5829,6 @@ hollow or black heads with or without @notation{stems}, @notation{flags}, etc. For percussion instruments (often having no defined pitch) the note head may indicate the instrument. - @seealso @ref{clef}, @ref{flag}, @ref{staff}, @ref{stem}. @@ -5850,7 +5836,6 @@ indicate the instrument. @node note names @section note names - @seealso @ref{Pitch names} @@ -5867,11 +5852,17 @@ DK nodeværdi, S: notvärde, FI: nuotin aika-arvo. -Note values (durations) are measured as fractions -- normally half -- of the -next higher note value. The longest duration in current use is the -@emph{breve}, but sometimes (especially music from the Baroque or earlier) the -double-length note value @emph{longa} or the quadruple-length note value -@emph{maxima} are used. +Note values (durations) are measured as fractions—in modern usage, one-half—of +the next higher note value. The longest duration in current use is the +@notation{breve} (equal to two whole notes), but sometimes (especially in music +dating from the Baroque era or earlier) the @notation{longa} (four whole notes) +or @notation{maxima} (eight whole notes) may be found. + +As used in mensural notation, this fraction was more flexible: it could also +be one-third the higher note value. Composers indicated which proportions +to use with various signs—two of which survive to the present day: the +C-shaped sign for @notation{common time}, and the slashed C for +@notation{alla breve} or @notation{cut-time}. @c TODO -- add maxima to this example, in a way that doesn't break it. @@ -5893,8 +5884,10 @@ double-length note value @emph{longa} or the quadruple-length note value r16_"1/16" s16 r32_"1/32" s16 r64_"1/64" s32 } @end lilypond -An augmentation dot after a note multiplies the duration by one and a -half. Another dot adds yet a fourth of the duration. +An augmentation dot after a note increases its duration by half; a second dot +increases it by half of the first addition (that is, by a fourth of the original +duration). More dots can be used to add further halved fractions of the +original note value (1/8, 1/16, etc.), but they are not frequently encountered. @lilypond[quote,line-width=13.0\cm] \relative c'' { @@ -5926,15 +5919,13 @@ dotted notes are also frequently used. } @end lilypond - @seealso -None yet. +@ref{common time}. @node octavation @section octavation - @seealso @ref{octave marking}. @@ -5956,7 +5947,6 @@ The interval of an octave, sometimes abbreviated @notation{8ve}. For uses like @notation{all'ottava} or @notation{8va} with an extender line or bracket, or @notation{loco} see octave marking. - @seealso @ref{interval}, @ref{octave marking}. @@ -6008,11 +5998,10 @@ To parallel the list above: In the phrases above, @notation{quindicesima} is sometimes replaced with @notation{quindecima}, which is Latin. -Finally, the music on an entire staff can be marked to be played in a -different octave by putting a small 8 or 15 above or below the clef at the -beginning. This octave mark can be applied to any clef, but it is most -frequently used with the G and F clefs. - +The music on an entire staff can be marked to be played in a different octave by +putting a small 8 or 15 above or below the clef at the beginning. This octave +mark can be applied to any clef, but it is most frequently used with the G and F +clefs. @seealso @ref{F clef}, @ref{G clef}, @ref{loco}, @ref{octave marking}. @@ -6037,7 +6026,6 @@ octave. For a list of the specific marks used, see @ref{octave mark}. - @seealso @ref{interval}, @ref{loco}, @ref{octave}, @ref{octave mark}. @@ -6045,7 +6033,6 @@ For a list of the specific marks used, see @ref{octave mark}. @node octave sign @section octave sign - @seealso @ref{octave mark}. @@ -6104,7 +6091,6 @@ the >> @end lilypond - @seealso @ref{acciaccatura}, @ref{appoggiatura}, @ref{grace notes}. @@ -6125,9 +6111,8 @@ Ossia (otherwise) marks an alternative. It is an added staff or piano score, usually only a few measures long, which presents another version of the music, for example for small hands. - @seealso -None yet. +No cross-references. @node part @@ -6152,7 +6137,6 @@ web. @end itemize - @seealso @ref{counterpoint} @@ -6160,7 +6144,6 @@ web. @node pause @section pause - @seealso @ref{fermata}. @@ -6168,7 +6151,6 @@ web. @node pennant @section pennant - @seealso @ref{flag}. @@ -6192,12 +6174,11 @@ pattern to one or more measures. There are other names for this symbol: @lilypond[quote,relative=2,line-width=13.0\cm] \time 4/4 -\repeat percent 4 { c4_"Beat (or slash) repeat" } -\repeat percent 2 { c4 e g b_"Measure repeat" } -\repeat percent 2 { c,2 es | f4 fis g c_"Multi-measure repeat" | } + \repeat percent 4 { c4_"Beat (or slash) repeat" } + \repeat percent 2 { c4 e g b_"Measure repeat" } + \repeat percent 2 { c,2 es | f4 fis g c_"Multi-measure repeat" | } @end lilypond - @seealso @ref{repeat}, @uref{http://www.music.vt.edu/musicdictionary/textr/Repeat.html,University of @@ -6222,9 +6203,8 @@ kettledrums (I: @emph{timpani}, D: @emph{Pauken}), snare drum, bass drum, tambourine, cymbals, chinese gong (tam-tam), triangle, celesta, glockenspiel, and xylophone. - @seealso -None yet. +No cross-references. @node perfect interval @@ -6239,7 +6219,6 @@ DK: rent interval, S: rent intervall, FI: puhdas intervalli. - @seealso @ref{interval}. @@ -6258,7 +6237,6 @@ FI: fraasi, lause. A natural division of the melodic line, comparable to a sentence of speech. - @seealso @ref{caesura}. @@ -6278,7 +6256,6 @@ FI: fraseeraus, jäsentäminen. The clear rendering in musical performance of the @notation{phrases} of the melody. Phrasing may be indicated by a @notation{slur}. - @seealso @ref{phrase}, @ref{slur}. @@ -6298,9 +6275,8 @@ FI, piano, hiljaa. @emph{piano} (@b{p}) soft, @emph{pianissimo} (@b{pp}) very soft, @emph{mezzo piano} (@b{mp}) medium soft. - @seealso -None yet. +No cross-references. @node pickup @@ -6315,7 +6291,6 @@ DK: optakt, S: upptakt, FI: kohotahti. - @seealso @ref{anacrusis}. @@ -6345,7 +6320,6 @@ association of a particular frequency with a particular pitch name, e.g., c' = @end enumerate - @seealso @ref{Pitch names}. @@ -6365,9 +6339,8 @@ FI: pizzicato, näppäillen. A technique for stringed instruments, abbr. @emph{pizz}. To play by plucking the strings. - @seealso -None yet. +No cross-references. @node polymeter @@ -6391,7 +6364,6 @@ parts. @end itemize - @seealso @ref{polymetric} (adj.) @@ -6411,7 +6383,6 @@ FI: monia tahtiosoituksia yhtäaikaa tai peräkkäin sisältävä. Characterized by @emph{polymeter}: using two or more metric frameworks simultaneously or in alternation. - @seealso @ref{polymeter} (noun) @@ -6430,7 +6401,6 @@ FI: vaihtelevan tahtiosoitusmerkintä. A time signature that indicates regularly alternating polymetric time. - @seealso @ref{polymetric}. @@ -6450,7 +6420,6 @@ FI: polyfonia, moniäänisyys. Music written in a combination of several simultaneous voices (parts) of a more or less pronounced individuality. - @seealso @ref{counterpoint}. @@ -6464,7 +6433,6 @@ A stroke in which each of several notes is separated slightly within a slur, without changing the bow's direction. It is used for passages of a @notation{cantabile} character. - @seealso @ref{legato}. @@ -6486,9 +6454,8 @@ FI: presto, hyvin nopeasti. Very quick, i.e., quicker than @ref{allegro}; @emph{prestissimo} denotes the highest possible degree of speed. - @seealso -None yet. +No cross-references. @node proportion @@ -6503,7 +6470,7 @@ DK: ?, S: ?, FI: suhde. -[Latin: @emph{proportio}.] Described in great detail by Gaffurius, in +[Latin: @emph{proportio}] Described in great detail by Gaffurius, in @emph{Practica musicae} (published in Milan in 1496). In mensural notation, proportion is: @@ -6522,8 +6489,8 @@ The most common proportions are: @itemize @item 2:1 (or simply 2), expressed by a vertical line through the -mensuration sign (the origin of the @q{cut-time} time signature), or by -turning the sign backwards +mensuration sign (the origin of the @notation{alla breve} time signature), +or by turning the sign backwards @item 3:1 (or simply 3) @item 3:2 (@emph{sesquialtera}) @end itemize @@ -6590,7 +6557,6 @@ another question: @c TODO: add an example or two. O => 4/3, and its modern equivalent - @seealso @ref{mensural notation}. @@ -6616,7 +6582,6 @@ on C eventually circles back to C. However, this C is 23.5 @ref{cent}s higher than the C obtained by adding 7 octaves. The difference between those two pitches is the Pythagorean comma. - @seealso @ref{cent}, @ref{temperament}. @@ -6633,7 +6598,6 @@ DK: kvartol, S: kvartol, FI: kvartoli. - @seealso @ref{note value}. @@ -6683,7 +6647,6 @@ indications are sometimes superscripted and sometimes not (e.g. Dm7, Dm^7, and D^m7 are all identical). The last three chords are not commonly used except in jazz. - @seealso @ref{chord}. @@ -6703,7 +6666,6 @@ except in jazz. @item FI: neljäsosanuotti @end itemize - @seealso @ref{note value}. @@ -6723,7 +6685,6 @@ except in jazz. @item FI: neljäsosatauko @end itemize - @seealso @ref{note value}. @@ -6742,7 +6703,6 @@ FI: neljännessävelaskel. An interval equal to half a semitone. - @seealso @ref{interval} @@ -6759,7 +6719,6 @@ DK: kvintol, S: kvintol, FI: kvintoli. - @seealso @ref{note value}. @@ -6778,7 +6737,6 @@ FI: rallerdando, hidastuen. [Italian] A performance indication, abbreviated "rall.". - @seealso @ref{ritardando}. @@ -6809,7 +6767,6 @@ c1_"c minor" d es f g a! b! c \bar "||" @end lilypond - @seealso @ref{key}, @ref{key signature}, @ref{major}, @ref{minor}. @@ -6837,9 +6794,8 @@ FI: toisto. } @end lilypond - @seealso -None yet. +No cross-references. @node rest @@ -6857,7 +6813,6 @@ FI: tauko. @c F: 'pause' if you mean a whole rest, 'silence' if you do not want to @c specify the rest's value. - @seealso @ref{note value}. @@ -6890,9 +6845,8 @@ metrical unit (beat). @end itemize - @seealso -None yet. +No cross-references. @node ritardando @@ -6909,9 +6863,8 @@ FI: ritardando, hidastuen, Gradually slackening in speed. Mostly abbreviated to rit.@: or ritard. - @seealso -None yet. +No cross-references. @node ritenuto @@ -6928,9 +6881,8 @@ FI: ritenuto, hidastaen. Immediate reduction of speed. - @seealso -None yet. +No cross-references. @node scale @@ -6945,7 +6897,6 @@ DK: Skala, S: skala, FI: asteikko, sävelasteikko. - @seealso @ref{diatonic scale}. @@ -6978,7 +6929,6 @@ scale as roots of chords. The most important are degrees I = tonic >> @end lilypond - @seealso @ref{functional harmony}. @@ -6995,7 +6945,7 @@ DK: ?, S: ?, FI: epätavallinen viritys. -[Italian: @emph{scordare}, @q{to mistune}.] Unconventional +[Italian: @emph{scordare}, @q{to mistune}] Unconventional tuning of stringed instruments, particularly lutes or violins. Used to: @@ -7019,9 +6969,8 @@ available on open strings Tunings that could be called @var{scordatura} first appeared early in the 16th Century and became commonplace in the 17th. - @seealso -None yet. +No cross-references. @node score @@ -7040,9 +6989,8 @@ A copy of orchestral, choral, or chamber music showing what each instrument is to play, each voice to sing, having each part arranged one underneath the other on different staves @ref{staff}. - @seealso -None yet. +No cross-references. @node second @@ -7057,14 +7005,36 @@ DK: sekund, S: sekund, FI: sekunti. -The @ref{interval} between two neighboring tones of a scale. A -@ref{diatonic scale} consists of alternating @ref{semitone}s and -@ref{whole tone}s, hence the size of a second depends on the scale -degrees in question. +The interval between two neighboring tones of a scale. A diatonic scale +consists of alternating semitones and whole tones, hence the size of a +second depends on the scale degrees in question. + +@seealso +@ref{diatonic scale}, @ref{interval}, @ref{semitone}, @ref{whole tone}. + + +@node semibreve +@section semibreve + +@itemize +@item US: whole note, +@item ES: redonda, +@item I: semibreve, +@item F: ronde, +@item D: Ganze, ganze Note, +@item NL: hele noot, +@item DK: helnode, +@item S: helnot, +@item FI: kokonuotti. +@end itemize + +Note value: called @notation{whole note} in the US. +The semibreve is the basis for the @notation{tactus} in mensural notation +(i.e. music written before ca. 1600). @seealso -None yet. +@ref{mensural notation}, @ref{note value}. @node semitone @@ -7080,7 +7050,7 @@ S: halvton, FI: puolisävel. The interval of a minor second. The (usually) smallest interval in European -composed music. The interval between two neighbouring tones on the piano +composed music. The interval between two neighboring tones on the piano keyboard -- including black and white keys -- is a semitone. An octave may be divided into 12@w{ }semitones. @@ -7088,7 +7058,6 @@ be divided into 12@w{ }semitones. g1 gis s a bes s b! c @end lilypond - @seealso @ref{interval}, @ref{chromatic scale}. @@ -7105,7 +7074,6 @@ DK: septim, S: septim, FI: septimi. - @seealso @ref{interval}. @@ -7113,7 +7081,6 @@ FI: septimi. @node sextolet @section sextolet - @seealso @ref{sextuplet}, @ref{note value}. @@ -7130,7 +7097,6 @@ DK: sekstol, S: sextol, FI: sekstoli. - @seealso @ref{note value}. @@ -7138,7 +7104,6 @@ FI: sekstoli. @node shake @section shake - @seealso @ref{trill}. @@ -7155,7 +7120,6 @@ DK: kryds, S: kors@-förtecken, FI: korotusmerkki. - @seealso @ref{accidental}. @@ -7172,11 +7136,10 @@ DK: ?, S: ?, FI: samoin. -[Italian: @q{in the same manner}.] Performance direction: the music thus marked +[Italian: @q{in the same manner}] Performance direction: the music thus marked is to be played in the same manner (i.e. with the same articulations, dynamics, etc.) as the music that precedes it. - @seealso TODO: Where else could I refer the reader? @@ -7196,7 +7159,6 @@ FI: kaksijakoinen tahtiosoitus. A meter in which the basic beat is subdivided in two: that is, a meter that does not include triplet subdivision of the beat. - @seealso @ref{compound meter}, @ref{meter}. @@ -7216,7 +7178,6 @@ that does not include triplet subdivision of the beat. @item FI: kuudestoistaosanuotti @end itemize - @seealso @ref{note value}. @@ -7236,7 +7197,6 @@ that does not include triplet subdivision of the beat. @item FI: kuudestoistaosatauko @end itemize - @seealso @ref{note value}. @@ -7253,7 +7213,6 @@ DK: sekst, S: sext, FI: seksti. - @seealso @ref{interval}. @@ -7273,7 +7232,6 @@ FI: seksti. @item FI: kuudeskymmenesneljäsosanuotti @end itemize - @seealso @ref{note value}. @@ -7293,7 +7251,6 @@ FI: seksti. @item FI: kuudeskymmenesneljäsosatauko @end itemize - @seealso @ref{note value}. @@ -7301,7 +7258,6 @@ FI: seksti. @node slash repeat @section slash repeat - @seealso @ref{percent repeat}. @@ -7322,9 +7278,8 @@ A slur above or below a group of notes indicates that they are to be played @ref{legato}, e.g., with one stroke of the violin bow or with one breath in singing. - @seealso -None yet. +No cross-references. @node solmization @@ -7344,7 +7299,6 @@ General term for systems of designating the degrees of the @emph{re}, @emph{mi}, @emph{fa}, @emph{sol}, @emph{la}, @emph{si} (@emph{ti})). - @seealso @ref{scale}, @ref{scale degree}. @@ -7366,9 +7320,8 @@ composition for piano or for some other instrument with piano accompaniment, which consists of three or four independant pieces, called movements. - @seealso -None yet. +No cross-references. @node sonata form @@ -7396,7 +7349,6 @@ of these two. The second theme is in another key, normally in the key of the @notation{dominant} if the @notation{tonic} is @notation{major}, and in the @notation{relative key} if the tonic is @notation{minor}. - @seealso @ref{dominant}, @ref{major}, @ref{minor}, @ref{relative key}, @ref{sonata}, @ref{symphony}, @ref{tonic}. @@ -7405,7 +7357,6 @@ of these two. The second theme is in another key, normally in the key of the @node song texts @section song texts - @seealso @ref{lyrics}. @@ -7424,9 +7375,8 @@ FI: sopraano, korkea naisääni. The highest female voice. - @seealso -None yet. +No cross-references. @node staccato @@ -7447,15 +7397,14 @@ below the note head. @lilypond[quote,relative=2] \key d \major \time 4/4 -\partial 8 a8 | -d4-\staccato cis-\staccato b-\staccato cis-\staccato | -d2. -\bar "||" + \partial 8 a8 | + d4-\staccato cis-\staccato b-\staccato cis-\staccato | + d2. + \bar "||" @end lilypond - @seealso -None yet. +No cross-references. @node staff @@ -7476,15 +7425,13 @@ lines upon and between which the musical notes are written, thus indicating (in connection with a @ref{clef}) their pitch. Staves for @ref{percussion} instruments may have fewer lines. - @seealso -None yet. +No cross-references. @node staves @section staves - @seealso @ref{staff}. @@ -7506,13 +7453,12 @@ whole note. @lilypond[quote,notime,relative=2] \set Score.autoBeaming = ##f -g2_"1/2" g' s16 -g,4_"1/4" g' s16 -g,8_"1/8" g' s16 -g,16_"1/16" g' s16 + g2_"1/2" g' s16 + g,4_"1/4" g' s16 + g,8_"1/8" g' s16 + g,16_"1/16" g' s16 @end lilypond - @seealso @ref{beam}. @@ -7529,10 +7475,9 @@ DK: ?, S: ?, FI: kiihdyttäen, nopeuttaen. -[Italian: @q{pressing}.] Pressing, urging, or hastening the time, as to a +[Italian: @q{pressing}] Pressing, urging, or hastening the time, as to a climax. - @seealso @ref{accelerando}. @@ -7553,9 +7498,8 @@ A family of stringed musical instruments played with a bow. Strings commonly used in a symphony orchestra are violin, viola, violoncello, and double bass. - @seealso -None yet. +No cross-references. @node strong beat @@ -7570,7 +7514,6 @@ D: betonet taktslag, S: betonat taktslag, FI: tahdin vahva isku. - @seealso @ref{beat}, @ref{accent}, @ref{measure}, @ref{rhythm}. @@ -7589,7 +7532,6 @@ FI: subdominantti, alidominantti. The fourth @notation{scale degree}. - @seealso @ref{functional harmony}, @ref{scale degree}. @@ -7608,7 +7550,6 @@ FI: alikeskisävel. The sixth @notation{scale degree}. - @seealso @ref{functional harmony}, @ref{scale degree}, @ref{superdominant}. @@ -7627,7 +7568,6 @@ FI: subtoonika, alitoonika. The seventh @ref{scale degree}. - @seealso @ref{functional harmony}, @ref{scale degree}. @@ -7647,9 +7587,8 @@ FI: G-kielellä. Indicates that the indicated passage (or note) should be played on the G string. - @seealso -None yet. +No cross-references. @node superdominant @@ -7666,7 +7605,6 @@ FI: ylidominantti. The sixth @ref{scale degree}. - @seealso @ref{functional harmony}, @ref{scale degree}, @ref{submediant}. @@ -7685,7 +7623,6 @@ FI: ylitoonika. The second @ref{scale degree}. - @seealso @ref{functional harmony}, @ref{scale degree}. @@ -7704,7 +7641,6 @@ FI: sinfonia. A symphony may be defined as a @emph{sonata} for orchestra. - @seealso @ref{sonata}. @@ -7737,9 +7673,8 @@ e16 c'8 e,16 c'8 e,16 c' ~ c4 @end lilypond - @seealso -None yet. +No cross-references. @node syntonic comma @@ -7764,7 +7699,6 @@ the sum of two octaves plus a major third. (3:2)^4 - (2:1)^2 + (5:4) This comma is also known as the comma of Didymus, or didymic comma. - @seealso @ref{Pythagorean comma} @@ -7784,7 +7718,6 @@ FI: nuottijärjestelmä. The collection of staves (@notation{staff}), two or more, as used for writing down of keyboard, chamber, choral, or orchestral music. - @seealso @ref{staff}. @@ -7804,7 +7737,6 @@ FI: viritysjärjestelmä. Systems of tuning in which the intervals deviate from the acoustically pure intervals. - @seealso @ref{meantone temperament}, @ref{equal temperament}. @@ -7826,7 +7758,6 @@ slowest to the quickest, as is indicated by tempo marks as @notation{largo}, @notation{adagio}, @notation{andante}, @notation{allegro}, and @notation{presto}. - @seealso @ref{adagio}, @ref{allegro}, @ref{andante}, @ref{largo}, @ref{presto}. @@ -7846,7 +7777,6 @@ FI: tenori, korkea miesääni. The highest @q{natural} male voice (apart from @notation{countertenor}). - @seealso @ref{countertenor}. @@ -7863,7 +7793,6 @@ DK: decim, S: decima, FI: desimi. - @seealso @ref{note value}. @@ -7883,9 +7812,8 @@ FI: viiva, tenuto. An indication that a particular note should be held for the whole length, although this can vary depending on the composer and era. - @seealso -None yet. +No cross-references. @node third @@ -7900,7 +7828,6 @@ DK: terts, S: ters, FI: terssi. - @seealso @ref{interval}. @@ -7920,7 +7847,6 @@ FI: terssi. @item FI: kolmaskymmeneskahdesosanuotti @end itemize - @seealso @ref{note value}. @@ -7940,7 +7866,6 @@ FI: terssi. @item FI: kolmaskymmeneskahdesosatauko @end itemize - @seealso @ref{note value}. @@ -7948,7 +7873,6 @@ FI: terssi. @node thorough bass @section thorough bass - @seealso @ref{figured bass}. @@ -7978,7 +7902,6 @@ g2 ~ g4. r8 @node time @section time - @seealso @ref{meter}. @@ -7999,9 +7922,8 @@ The sign placed at the beginning of a composition to indicate its meter. It most often takes the form of a fraction, but a few signs derived from mensural notation and proportions are also employed. - @seealso -@ref{mensural notation}, @ref{meter}. +@ref{mensural notation}, @ref{mensuration sign}, @ref{meter}. @node tone @@ -8021,9 +7943,8 @@ Tone is a primary building material of music. @c Music from the 20th century may be based on atonal sounds. Meh, not so much - @seealso -None yet. +No cross-references. @node tonic @@ -8040,7 +7961,6 @@ FI: toonika. The first @notation{scale degree}. - @seealso @ref{functional harmony}, @ref{scale degree}. @@ -8078,7 +7998,6 @@ Not all transposing instruments include the pitch class in their name: @end itemize - @seealso @ref{concert pitch}. @@ -8118,9 +8037,8 @@ relative pitches. } @end lilypond - @seealso -None yet. +No cross-references. @node treble clef @@ -8135,7 +8053,6 @@ DK: diskantnøgle, S: diskantklav, FI: diskanttiavain. - @seealso @ref{G clef}. @@ -8165,12 +8082,11 @@ in the distance of a third (@ref{interval}). @end enumerate @lilypond[quote,notime,relative=1] -e2:32_"a" -f:32 [ e8:16 f:16 g:16 a:16 ] s4 -\repeat tremolo 8 { e32_"b" g } + e2:32_"a" + f:32 [ e8:16 f:16 g:16 a:16 ] s4 + \repeat tremolo 8 { e32_"b" g } @end lilypond - @seealso @ref{strings} @@ -8187,7 +8103,6 @@ DK: treklang, S: treklang, FI: kolmisointu. - @seealso @ref{chord}. @@ -8204,7 +8119,6 @@ DK: trille, S: drill, FI: trilli. - @seealso @ref{ornament}. @@ -8221,7 +8135,6 @@ DK: tredelt takt, S: tretakt, FI: kolmijakoinen. - @seealso @ref{meter}. @@ -8238,7 +8151,6 @@ DK: triol, S: triol, FI: trioli. - @seealso @ref{note value}. @@ -8255,7 +8167,6 @@ DK: tritonus, S: tritonus, FI: tritonus. - @seealso @ref{interval}. @@ -8276,7 +8187,6 @@ A two-pronged piece of steel used to indicate an absolute pitch, usually for @emph{A} above middle C (440 cps/Hz), which is the international tuning standard. Tuning forks for other pitches are available. - @seealso @ref{middle C}. @@ -8288,7 +8198,6 @@ A non-standard subdivision of a beat or part of a beat, usually indicated with a bracket and a number indicating the number of subdivisions. - @seealso @ref{triplet}, @ref{note value}. @@ -8305,7 +8214,6 @@ DK: dobbeltslag, S: dubbelslag, FI: korukuvio. - @seealso @ref{ornament}. @@ -8326,9 +8234,8 @@ Playing of the same notes or the same melody by various instruments (voices) or by the whole orchestra (choir), either at exactly the same pitch or in a different octave. - @seealso -None yet. +No cross-references. @node upbeat @@ -8343,7 +8250,6 @@ DK: optakt, S: upptakt, FI: kohotahti. - @seealso @ref{anacrusis} @@ -8377,9 +8283,8 @@ FI: ääni, lauluääni. @end itemize - @seealso -None yet. +No cross-references. @node volta @@ -8394,13 +8299,12 @@ DK: ?, S: ?, FI: yksi kertauksen maaleista. -[Italian: @q{time} (instance, not duration).] An ending, such as a first +[Italian: @q{time} (instance, not duration)] An ending, such as a first or second ending. LilyPond extends this idea to any number, and allows any text (not just a number) -- to serve as the @notation{volta} text. - @seealso -None yet. +No cross-references. @node weak beat @@ -8415,7 +8319,6 @@ DK: ubetonet taktslag, S: obetonat taktslag, FI: tahdin heikko isku. - @seealso @ref{beat}, @ref{measure}, @ref{rhythm}. @@ -8435,7 +8338,6 @@ FI: tahdin heikko isku. @item FI: kokonuotti @end itemize - @seealso @ref{note value}. @@ -8455,7 +8357,6 @@ FI: tahdin heikko isku. @item FI: kokotauko @end itemize - @seealso @ref{note value}. @@ -8476,7 +8377,6 @@ The interval of a major second. The interval between two tones on the piano keyboard with exactly one key between them -- including black and white keys -- is a whole tone. - @seealso @ref{interval}. @@ -8498,9 +8398,8 @@ instruments are actually made from metal. The woodwind instruments commonly used in a symphony orchestra are flute, oboe, clarinet, saxophone, and bassoon. - @seealso -None yet. +No cross-references. @node Duration names notes and rests @@ -8862,7 +8761,6 @@ the lilypond-user discussion list. @end multitable - @seealso @ref{mensural notation} diff --git a/Documentation/user/rhythms.itely b/Documentation/user/rhythms.itely index 21e1c66f95..3b13b80fe8 100644 --- a/Documentation/user/rhythms.itely +++ b/Documentation/user/rhythms.itely @@ -40,19 +40,23 @@ This section discusses rhythms, rests, durations, beaming and bars. @cindex durations, of notes @cindex note durations +@cindex length of notes +@cindex note lengths @funindex \longa +@funindex longa @funindex \breve +@funindex breve @funindex \maxima +@funindex maxima -Durations are designated by numbers and dots. -Durations are entered as their reciprocal values. For example, a -quarter note is entered using a @code{4} (since it is a 1/4 note), -and a half note is entered using a @code{2} (since it is a 1/2 -note). For notes longer than a whole you must use the -@code{\longa} (a double breve) and @code{\breve} commands. -Durations as short as 128th notes may be specified. Shorter values -are possible, but only as beamed notes. +Durations are designated by numbers and dots. Durations are entered +as their reciprocal values. For example, a quarter note is entered +using a @code{4} (since it is a 1/4 note), and a half note is entered +using a @code{2} (since it is a 1/2 note). For notes longer than a +whole you must use the @code{\longa} (a double breve) and +@code{\breve} commands. Durations as short as 128th notes may be +specified. Shorter values are possible, but only as beamed notes. @c Two 64th notes are needed to obtain beams @lilypond[quote,verbatim,relative=2] @@ -87,6 +91,8 @@ a a a2 a a4 a a1 a @cindex notes, dotted @cindex dotted notes +@cindex notes, double-dotted +@cindex double-dotted notes @funindex . @@ -110,8 +116,11 @@ duration. For details of this and other settings which control proportional notation, see @ref{Proportional notation}. @funindex \dotsUp +@funindex dotsUp @funindex \dotsDown +@funindex dotsDown @funindex \dotsNeutral +@funindex dotsNeutral Dots are normally moved up to avoid staff lines, except in polyphonic situations. Predefined commands are available to @@ -131,6 +140,7 @@ see @ref{Direction and placement}. Music Glossary: @rglos{breve}, @rglos{longa}, +@rglos{maxima}, @rglos{note value}, @rglos{Duration names notes and rests}. @@ -164,7 +174,9 @@ rests from 128th to maxima (8 x whole) may be printed. @cindex tuplets @cindex triplets + @funindex \times +@funindex times Tuplets are made from a music expression by multiplying all the durations with a fraction: @@ -186,8 +198,11 @@ c4 c \times 2/3 { b4 a g } @end lilypond @funindex \tupletUp +@funindex tupletUp @funindex \tupletDown +@funindex tupletDown @funindex \tupletNeutral +@funindex tupletNeutral The automatic placement of the tuplet bracket above or below the notes may be overridden manually with predefined commands, for @@ -272,12 +287,12 @@ placed within tuplet brackets. @cindex durations, scaling You can alter the duration of single notes, rests or chords by a -fraction @code{N/M} by appending @code{*N/M} (or @code{*N} if -@code{M} is 1) to the duration. -This will not affect the appearance of the notes or rests -produced, but the altered duration will be used in calculating the -position within the measure and setting the duration in the MIDI -output. Multiplying factors may be combined such as @code{*L*M/N}. +fraction @code{N/M} by appending @code{*N/M} (or @code{*N} if @code{M} +is 1) to the duration. This will not affect the appearance of the +notes or rests produced, but the altered duration will be used in +calculating the position within the measure and setting the duration +in the MIDI output. Multiplying factors may be combined such as +@code{*L*M/N}. In the following example, the first three notes take up exactly two beats, but no triplet bracket is printed. @@ -299,7 +314,9 @@ a multiplier. This is useful for skipping many measures, e.g., @code{s1*23}. @cindex compressing music + @funindex \scaleDurations +@funindex scaleDurations Longer stretches of music may be compressed by a fraction in the same way, as if every note, chord or rest had the fraction as a @@ -342,6 +359,7 @@ Snippets: @unnumberedsubsubsec Ties @cindex tie + @funindex ~ A tie connects two adjacent note heads of the same pitch. The tie @@ -389,12 +407,14 @@ the chord. @end lilypond -@funindex \repeatTie @cindex repeating ties @cindex ties, repeating @cindex volta brackets and ties @cindex ties and volta brackets +@funindex \repeatTie +@funindex repeatTie + When a second alternative of a repeat starts with a tied note, you have to specify the repeated tie as follows: @@ -409,7 +429,9 @@ have to specify the repeated tie as follows: @cindex laissez vibrer @cindex ties, laissez vibrer + @funindex \laissezVibrer +@funindex laissezVibrer @notation{L.v.} ties (@notation{laissez vibrer}) indicate that notes must not be damped at the end. It is used in notation for @@ -421,18 +443,30 @@ be entered as follows: @end lilypond @cindex ties, placement + @funindex \tieUp +@funindex tieUp @funindex \tieDown +@funindex tieDown @funindex \tieNeutral +@funindex tieNeutral The vertical placement of ties may be controlled, see Predefined commands, or for details, see @ref{Direction and placement}. @cindex ties, appearance +@cindex ties, dotted +@cindex ties, dashed +@cindex dashed ties +@cindex dotted ties + @funindex \tieDotted +@funindex tieDotted @funindex \tieDashed +@funindex tieDashed @funindex \tieSolid +@funindex tieSolid Solid, dotted or dashed ties may be specified, see Predefined commands. @@ -484,8 +518,6 @@ well-defined. In these cases, a slur may be preferable. - - @node Writing rests @subsection Writing rests @@ -501,15 +533,19 @@ Rests are entered as part of the music in music expressions. @unnumberedsubsubsec Rests @cindex rest @cindex rest, entering durations -@cindex maxima -@cindex longa -@cindex breve +@cindex maxima rest +@cindex longa rest +@cindex breve rest @funindex \rest +@funindex rest @funindex r @funindex \maxima +@funindex maxima @funindex \longa +@funindex longa @funindex \breve +@funindex breve Rests are entered like notes with the note name @code{r}. Durations longer than a whole rest use the predefined @@ -559,6 +595,11 @@ a4\rest d4\rest @seealso +Music Glossary: +@rglos{breve}, +@rglos{longa}, +@rglos{maxima}. + Notation Reference: @ref{Full measure rests}. @@ -584,8 +625,11 @@ are rests from 128th to maxima (8 x whole). @cindex invisible rest @cindex rest, invisible @cindex spacer note +@cindex spacer rest + @funindex s @funindex \skip +@funindex skip An invisible rest (also called a @q{spacer rest}) can be entered like a note with the note name@tie{}@code{s}: @@ -647,6 +691,8 @@ Internals Reference: @rinternals{SkipMusic} @cindex rest, multi-measure @cindex rest, full-measure @cindex whole rest for a full measure +@cindex rest, whole for a full measure + @funindex R Rests for one or more full measures are entered like notes with @@ -661,10 +707,10 @@ R1*4 b2^"Tutti" b4 a4 @end lilypond -The duration of full-measure rests is identical to the duration notation -used for notes. The duration in a multi-measure rest must always be an -integral number of measure-lengths, so augmentation -dots or fractions must often be used: +The duration of full-measure rests is identical to the duration +notation used for notes. The duration in a multi-measure rest must +always be an integral number of measure-lengths, so augmentation dots +or fractions must often be used: @lilypond[quote,fragment,verbatim] \compressFullBarRests @@ -678,9 +724,8 @@ R1*13/8 | R1*13/8*12 | R4*5*4 | @end lilypond -A full-measure rest is printed as either a whole -or breve rest, centered in the measure, depending on the time -signature. +A full-measure rest is printed as either a whole or breve rest, +centered in the measure, depending on the time signature. @lilypond[quote,verbatim,fragment] \time 4/4 @@ -690,16 +735,19 @@ R1*3/2 | \time 8/4 R1*2 | @end lilypond -@funindex \expandFullBarRests -@funindex \compressFullBarRests + @cindex multi-measure rest, expanding @cindex multi-measure rest, contracting -By default a multi-measure rest is expanded in the printed score -to show all the rest measures explicitly. -Alternatively, a mult-measure rest can be shown as a single measure -containing a multi-measure rest symbol, with the number of measures of rest -printed above the measure: +@funindex \expandFullBarRests +@funindex expandFullBarRests +@funindex \compressFullBarRests +@funindex compressFullBarRests + +By default a multi-measure rest is expanded in the printed score to +show all the rest measures explicitly. Alternatively, a multi-measure +rest can be shown as a single measure containing a multi-measure rest +symbol, with the number of measures of rest printed above the measure: @lilypond[quote,fragment,verbatim] % Default behavior @@ -723,6 +771,9 @@ R2.*2 | @cindex fermata on multi-measure rest @cindex multi-measure rest, attaching fermata +@funindex \fermataMarkup +@funindex fermataMarkup + Markups can be added to multi-measure rests. The predefined command @code{\fermataMarkup} is provided for adding fermatas. @@ -801,6 +852,9 @@ Internals Reference: @rinternals{MultiMeasureRestText}. +@cindex fingerings and multi-measure rests +@cindex multi-measure rests and fingerings + @knownissues If an attempt is made to use fingerings (e.g., @@ -835,7 +889,9 @@ Multi-measure rests do not take part in rest collisions. @cindex time signature @cindex meter + @funindex \time +@funindex time The time signature is set as follows: @@ -844,7 +900,7 @@ The time signature is set as follows: \time 3/4 c2. @end lilypond -@cindex Time signature, visibility of +@cindex time signature, visibility of Time signatures are printed at the beginning of a piece and whenever the time signature changes. If a change takes place @@ -862,9 +918,13 @@ c c c c c c @end lilypond +@cindex time signature style +@cindex meter style + @funindex \numericTimeSignature +@funindex numericTimeSignature @funindex \defaultTimeSignature -@cindex time signature style +@funindex defaultTimeSignature The time signature symbol that is used in 2/2 and 4/4 time can be changed to a numeric style: @@ -932,7 +992,9 @@ Internals Reference: @cindex pickup measure @cindex measure, change length @cindex measurePosition + @funindex \partial +@funindex partial Partial or pick-up measures, such as an anacrusis or upbeat, are entered using the @code{\partial} command, with the syntax @@ -992,13 +1054,16 @@ odd warnings may occur. @node Unmetered music @unnumberedsubsubsec Unmetered music -@funindex \cadenzaOn -@funindex \cadenzaOff @cindex bar lines, turning off @cindex bar numbering, turning off @cindex cadenza @cindex unmetered music +@funindex \cadenzaOn +@funindex cadenzaOn +@funindex \cadenzaOff +@funindex cadenzaOff + Bar lines and bar numbers are calculated automatically. For unmetered music (some cadenzas, for example), this is not desirable. To turn off automatic calculation of bar lines and bar numbers, @@ -1068,9 +1133,17 @@ to indicate where breaks can occur. @cindex double time signatures @cindex signatures, polymetric +@cindex time signatures, polymetric +@cindex time signatures, double @cindex polymetric signatures @cindex meter, polymetric +@funindex timeSignatureFraction +@funindex \scaleDurations +@funindex scaleDurations +@funindex \times +@funindex times + Polymetric notation is supported, either explicitly or by modifying the visible time signature symbol and scaling the note durations. @@ -1085,6 +1158,9 @@ signature; see @ref{Time signature}. The scaling is done with @code{\times}, but does not create a tuplet bracket; see @ref{Scaling durations}. +@cindex beaming in polymetric music +@cindex beaming in polymetric meter + In this example, music with the time signatures of 3/4, 9/8, and 10/8 are used in parallel. In the second staff, shown durations are multiplied by 2/3, as 2/3 * 9/8 = 3/4, and in the third @@ -1202,6 +1278,9 @@ time signatures. @cindex notes, splitting @cindex splitting notes +@funindex Note_heads_engraver +@funindex Completion_heads_engraver + Long notes which overrun bar lines can be converted automatically to tied notes. This is done by replacing the @code{Note_heads_engraver} with the @@ -1252,6 +1331,9 @@ split rests. @node Showing melody rhythms @unnumberedsubsubsec Showing melody rhythms +@cindex melody rhythms, showing +@cindex rhythms, showing melody + Sometimes you might want to show only the rhythm of a melody. This can be done with the rhythmic staff. All pitches of notes on such a staff are squashed, and the staff itself has a single line @@ -1275,6 +1357,16 @@ staff are squashed, and the staff itself has a single line >> @end lilypond +@cindex guitar chord charts +@cindex strumming rhythms, showing +@cindex guitar strumming rhythms, showing + +@funindex Pitch_squash_engraver +@funindex \improvisationOn +@funindex improvisationOn +@funindex \improvisationOff +@funindex improvisationOff + Guitar chord charts often show the strumming rhythms. This can be done with the @code{Pitch_squash_engraver} and @code{\improvisationOn}. @@ -1339,6 +1431,16 @@ By default, beams are inserted automatically: @cindex beams, manual @cindex manual beams +@cindex beams, setting rules for +@cindex beams, custom rules for + +@funindex measureLength +@funindex beatLength +@funindex beatGrouping +@funindex \autoBeamOn +@funindex autoBeamOn +@funindex \autoBeamOff +@funindex autoBeamOff @lilypond[quote,verbatim,relative=2] \time 2/4 c8 c c c @@ -1356,8 +1458,6 @@ properties, @code{measureLength}, @code{beatLength} and @code{beatGrouping}. Both the beaming rules and the context properties can be overridden, see @ref{Setting automatic beam behavior}. -@cindex autoBeamOn -@cindex autoBeamOff @warning{If beams are used to indicate melismata in songs, then automatic beaming should be switched off with @code{\autoBeamOff} and the beams @@ -1385,9 +1485,10 @@ c16 c8 @snippets +@cindex line breaks and beams +@cindex beams and line breaks + @funindex breakable -@cindex break, line -@cindex line breaks @lilypondfile[verbatim,lilyquote,ragged-right,texidoc,doctitle] {beams-across-line-breaks.ly} @@ -1421,10 +1522,6 @@ Beams can collide with note heads and accidentals in other voices @node Setting automatic beam behavior @unnumberedsubsubsec Setting automatic beam behavior -@funindex autoBeaming -@funindex autoBeamSettings -@funindex (end * * * *) -@funindex (begin * * * *) @cindex automatic beams, tuning @cindex tuning automatic beaming @@ -1432,6 +1529,17 @@ Beams can collide with note heads and accidentals in other voices @cindex autobeam @cindex lyrics and beaming +@funindex autoBeaming +@funindex autoBeamSettings +@funindex (end * * * *) +@funindex (begin * * * *) +@funindex measureLength +@funindex beatLength +@funindex beatGrouping +@funindex \time +@funindex time +@funindex \set +@funindex set The placement of automatic beams is determined by the rules described in @ref{Automatic beams}. There are two mutually @@ -1696,15 +1804,20 @@ a a a a @lilypondfile[verbatim,lilyquote,ragged-right,texidoc,doctitle] {beam-endings-in-score-context.ly} +@funindex \autoBeamOff +@funindex autoBeamOff +@funindex \autoBeamOn +@funindex autoBeamOn @predefined -@funindex \autoBeamOff @code{\autoBeamOff}, -@funindex \autoBeamOn @code{\autoBeamOn}. @endpredefined +@cindex beam, last in score +@cindex beam, last in polyphonic voice + @knownissues If a score ends while an automatic beam has not been ended and is @@ -1723,6 +1836,8 @@ Snippets: @unnumberedsubsubsec Manual beams @cindex beams, manual +@cindex manual beams + @funindex ] @funindex [ @@ -1740,6 +1855,9 @@ marking the begin and end point with @code{[} and @code{]} @end lilypond +@funindex \noBeam +@funindex noBeam + Individual notes may be marked with @code{\noBeam} to prevent them from being beamed: @@ -1779,7 +1897,10 @@ g a] @unnumberedsubsubsec Feathered beams @cindex beams, feathered + @funindex \featherDurations +@funindex featherDurations +@funindex grow-direction Feathered beams are used to indicate that a small group of notes should be played at an increasing (or decreasing) tempo, without @@ -1795,11 +1916,10 @@ music expression delimited by braces and preceded by a @code{featheredDurations} command which specifies the ratio between the durations of the first and last notes in the group. -The square brackets -show the extent of the beam and the braces show -which notes are to have their durations modified. Normally -these would delimit the same group of notes, but this is not -required: the two commands are independent. +The square brackets show the extent of the beam and the braces show +which notes are to have their durations modified. Normally these +would delimit the same group of notes, but this is not required: the +two commands are independent. In the following example the eight 16th notes occupy exactly the same time as a half note, but the first note is one half as long @@ -1849,10 +1969,12 @@ Snippets: @unnumberedsubsubsec Bar lines @cindex bar lines -@funindex \bar @cindex measure lines @cindex repeat bars +@funindex \bar +@funindex bar + Bar lines delimit measures, and are also used to indicate repeats. Normally, simple bar lines are automatically inserted into the printed output at places based on the current time @@ -1896,13 +2018,18 @@ force) a line break to occur at this point. The bar number counter is not increased. To force a line break see @ref{Line breaking}. +@cindex manual bar lines +@cindex manual measure lines +@cindex bar lines, manual +@cindex measure lines, manual + This and other special bar lines may be inserted manually at any -point. When they coincide with the end of a measure they replace -the simple bar line which would have been inserted there -automatically. When they do not coincide -with the end of a measure the specified bar line is inserted at that -point in the printed output. Such insertions do not affect -the calculation and placement of subsequent automatic bar lines. +point. When they coincide with the end of a measure they replace the +simple bar line which would have been inserted there automatically. +When they do not coincide with the end of a measure the specified bar +line is inserted at that point in the printed output. Such insertions +do not affect the calculation and placement of subsequent automatic +bar lines. The simple bar line and five types of double bar line are available for manual insertion: @@ -1971,6 +2098,9 @@ connected between different staves of a @code{StaffGroup}, @funindex whichBar @funindex defaultBarType +@funindex \bar +@funindex bar +@funindex bartype The command @code{\bar }@var{bartype} is a shortcut for @code{\set Timing.whichBar = }@var{bartype}. A bar line is @@ -2002,6 +2132,9 @@ properties). @cindex bar numbers @cindex measure numbers +@cindex numbers, bar +@cindex numbers, measure + @funindex currentBarNumber Bar numbers are typeset by default at the start of every line except @@ -2019,9 +2152,10 @@ c1 c c c @snippets -@funindex barNumberVisibility @cindex bar numbers, regular spacing +@funindex barNumberVisibility + Bar numbers can be typeset at regular intervals instead of just at the beginning of every line. To do this the default behavior must be overridden to permit bar numbers to be printed at places @@ -2136,6 +2270,9 @@ Snippets: Internals Reference: @rinternals{BarNumber}. +@cindex bar number collision +@cindex collision, bar number + @knownissues Bar numbers may collide with the top of the @@ -2162,17 +2299,19 @@ c1 c c c @unnumberedsubsubsec Bar and bar number checks @cindex bar check +@cindex bar number check +@cindex measure check +@cindex measure number check + @funindex barCheckSynchronize @funindex | -Bar checks help detect errors in the entered durations. -A bar check may be entered using the bar symbol, @code{|}, -at any place where a bar line is expected to fall. -If bar check lines are encountered at other places, -a list of warnings is printed in the log file, -showing the line numbers and lines -in which the bar checks failed. In the next -example, the second bar check will signal an error. +Bar checks help detect errors in the entered durations. A bar check +may be entered using the bar symbol, @code{|}, at any place where a +bar line is expected to fall. If bar check lines are encountered at +other places, a list of warnings is printed in the log file, showing +the line numbers and lines in which the bar checks failed. In the +next example, the second bar check will signal an error. @example \time 3/4 c2 e4 | g2 | @@ -2218,6 +2357,9 @@ pipeSymbol = \bar "||" } @end lilypond +@funindex \barNumberCheck +@funindex barNumberCheck + When copying large pieces of music, it can be helpful to check that the LilyPond bar number corresponds to the original that you are entering from. This can be checked with @@ -2242,7 +2384,9 @@ Snippets: @cindex rehearsal marks @cindex mark, rehearsal + @funindex \mark +@funindex mark To print a rehearsal mark, use the @code{\mark} command @@ -2273,6 +2417,9 @@ mark manually. The value to use is stored in the property @cindex format, rehearsal mark @cindex mark, rehearsal, style @cindex mark, rehearsal, format +@cindex rehearsal mark, manual +@cindex mark, rehearsal, manual +@cindex custom rehearsal mark The style is defined by the property @code{markFormatter}. It is a function taking the current mark (an integer) and the current @@ -2322,6 +2469,12 @@ string. @cindex segno @cindex coda @cindex D.S al Fine +@cindex fermata +@cindex music glyphs +@cindex glyphs, music + +@funindex \musicglyph +@funindex musicglyph Music glyphs (such as the segno sign) may be printed inside a @code{\mark} @@ -2371,12 +2524,14 @@ Internals Reference: @rinternals{RehearsalMark}. @node Grace notes @unnumberedsubsubsec Grace notes -@funindex \grace @cindex ornaments @cindex grace notes @cindex appoggiatura @cindex acciaccatura +@funindex \grace +@funindex grace + Grace notes are ornaments that are written out. Grace notes are printed in a smaller font and take up no logical time in a measure. @@ -2408,10 +2563,11 @@ notes for every eighth grace note \new Staff { c2 \grace { g8[ b] } c2 } >> @end lilypond -@funindex \afterGrace - @cindex grace notes, following +@funindex \afterGrace +@funindex afterGrace + If you want to end a note with a grace, use the @code{\afterGrace} command. It takes two arguments: the main note, and the grace notes following the main note. diff --git a/GNUmakefile.in b/GNUmakefile.in index ac2329cf4f..f6389fbe3f 100644 --- a/GNUmakefile.in +++ b/GNUmakefile.in @@ -2,7 +2,7 @@ depth = . -SUBDIRS = buildscripts python scripts \ +SUBDIRS = python scripts \ flower lily \ mf ly \ tex ps scm \ @@ -87,11 +87,11 @@ final-install: WEB_TARGETS = offline WWW-post: -# need UTF8 setting in case this is hosted on a website. +# need UTF8 setting in case this is hosted on a website. echo -e 'AddDefaultCharset utf-8\nAddCharset utf-8 .html\nAddCharset utf-8 .en\nAddCharset utf-8 .nl\nAddCharset utf-8 .txt\n' > $(top-build-dir)/.htaccess - $(PYTHON) $(buildscript-dir)/mutopia-index.py -o $(outdir)/examples.html input/ + $(buildscript-dir)/mutopia-index -o $(outdir)/examples.html input/ find $(outdir) -name '*-root' | xargs rm -rf - $(PYTHON) $(buildscript-dir)/www_post.py $(PACKAGE_NAME) $(TOPLEVEL_VERSION) $(outdir) "$(WEB_TARGETS)" + $(buildscript-dir)/www_post $(PACKAGE_NAME) $(TOPLEVEL_VERSION) $(outdir) "$(WEB_TARGETS)" find $(outdir)/offline-root -type l -delete @@ -200,7 +200,8 @@ $(config_h): config.hh.in @false grand-replace: - PATH=$(buildscript-dir)/$(outbase):$(PATH) $(BASH) $(buildscript-dir)/grand-replace.sh + $(MAKE) -C scripts/build + PATH=$(buildscript-dir):$(PATH) $(buildscript-dir)/grand-replace ################################################################ # testing @@ -229,7 +230,7 @@ test-baseline: local-check: test rm -rf $(RESULT_DIR) mkdir -p $(RESULT_DIR) - $(PYTHON) $(buildscript-dir)/output-distance.py --create-images --output-dir $(RESULT_DIR) input/regression/out-test-baseline input/regression/out-test/ + $(buildscript-dir)/output-distance --create-images --output-dir $(RESULT_DIR) input/regression/out-test-baseline input/regression/out-test/ @find input ly -name '*.ly' -print |grep -v 'out.*/' | xargs grep '\\version' -L | grep -v "standard input" |sed 's/^/**** Missing version: /g' diff --git a/ROADMAP b/ROADMAP index 4ec2eec4bd..d5be4a0077 100644 --- a/ROADMAP +++ b/ROADMAP @@ -20,7 +20,6 @@ source files. user/ User manuals po/ Translated manual node names fr/ es/ de/ Docs translated to French, Spanish, German, resp. - buildscripts/ Scripts for the build process elisp/ Emacs LilyPond mode and syntax coloring flower/ A simple C++ library input/ Music input examples @@ -41,8 +40,13 @@ source files. po/ Translations for binaries and end-user scripts ps/ PostScript library files python/ Python modules, MIDI module + aux/ Python modules used by maintenance scripts + or in the build process scm/ Scheme sources for LilyPond and subroutine files scripts/ End-user scripts + aux/ Scripts for maintaining the sources and scripts + for the build process that need not be built + build/ Scripts for the build process that must be built stepmake/ Generic make subroutine files tex/ TeX and texinfo library files vim/ Vi(M) LilyPond mode and syntax coloring diff --git a/SConstruct b/SConstruct deleted file mode 100644 index 6ec14e2274..0000000000 --- a/SConstruct +++ /dev/null @@ -1,967 +0,0 @@ -# -*-python-*- - -''' -Experimental scons (www.scons.org) building. - -Usage - - scons TARGET - -build from source directory ./TARGET (not recursive) - -Configure, build - - scons [config] # configure - scons # build all - -Run from build tree - - run=$(pwd)/out-scons/usr - export LOCALE=$run/share/locale - export TEXMF='{'$run/share/lilypond,$(kpsexpand '$TEXMF')'}' - PATH=$run/bin:$PATH - - #optionally, if you do not use custom.py below - #export LILYPOND_DATADIR=$run/share/lilypond/ - - lilypond input/simple - -Other targets - scons mf-essential # build minimal mf stuff - - scons doc # build web doc - scons config # reconfigure - scons install # install - scons -c # clean - scons -h # help - - scons / # build *everything* (including installation) - -Options (see scons -h) - scons build=DIR # clean srcdir build, output below DIR - scons out=DIR # write output for alterative config to DIR - -Debugging - scons --debug=dtree - scons --debug=explain - scons verbose=1 - -Optional custom.py - -import os -out='out-scons' -optimising=0 -debugging=1 -gui=1 -os.path.join (os.getcwd (), '=install') -prefix=os.path.join (os.environ['HOME'], 'usr', 'pkg', 'lilypond') - -''' - - -# TODO: - -# * reality check: -# - too many stages in Environments setup -# (see also buildscripts/builders.py) -# - Home-brew scons.cach configuration caching -# - Home-brew source tarball generating -- [why] isn't that in SCons? - -# * usability and documentation for "./configure; make" users - -# * too much cruft in toplevel SConstruct - -# * (optional) operation without CVS directories, from tarball - -# * more program configure tests, actually use full executable name - -# * install doc - -# * split doc target: doc input examples mutopia? - -# * grep FIXME $(find . -name 'S*t') - -# * drop "fast" - -import re -import glob -import os -import string -import sys -import stat -import shutil - -# duh, we need 0.95.1 -EnsureSConsVersion (0, 96, 92) - -usage = r'''Usage: -[ENVVAR=VALUE]... scons [OPTION=VALUE]... [TARGET|DIR]... - -TARGETS: clean, config, doc, dist, install, mf-essential, po-update, - realclean, release, sconsclean, tar, TAGS - -ENVVARS: BASH, CCFLAGS, CC, CXX, LIBS, PYTHON, SH... - (see SConstruct:config_vars) - -OPTIONS: -''' - - -config_cache = 'scons.cache' -if os.path.exists (config_cache) and 'config' in COMMAND_LINE_TARGETS: - os.unlink (config_cache) - -# All config_vars can be set as ENVVAR, eg: -# -# CXX=g++-4.0 GS=~/usr/pkg/gs/bin/gs scons config -# -# append test_program variables automagically? -config_vars = [ - 'BASH', - 'BYTEORDER', - 'CC', - 'CCFLAGS', - 'CPPPATH', - 'CPPDEFINES', - 'CXX', - 'CXXFLAGS', - 'DEFINES', - 'DVIPS', - 'FONTFORGE', - 'GCC', - 'GXX', - 'GS', - 'LIBS', - 'LINKFLAGS', - 'MF', - 'PERL', - 'PYTHON', - 'SH', - ] - -# Put your favourite stuff in custom.py -opts = Options ([config_cache, 'custom.py'], ARGUMENTS) -opts.Add ('prefix', 'Install prefix', '/usr/') -opts.Add ('out', 'Output directory', 'out-scons') -opts.Add ('build', 'Build directory', '.') -opts.Add ('DESTDIR', 'DESTDIR prepended to prefix', '') -opts.AddOptions ( - BoolOption ('warnings', 'compile with -Wall and similiar', - 1), - BoolOption ('debugging', 'compile with debugging symbols', - 0), - BoolOption ('optimising', 'compile with optimising', - 1), - BoolOption ('shared', 'build shared libraries', - 0), - BoolOption ('static', 'build static libraries', - 1), - BoolOption ('gui', 'build with GNOME backend (EXPERIMENTAL)', - 0), - BoolOption ('verbose', 'run commands with verbose flag', - 0), - BoolOption ('checksums', 'use checksums instead of timestamps', - 0), - BoolOption ('fast', 'use timestamps, implicit cache, prune CPPPATH', - 0), - ) - -srcdir = Dir ('.').srcnode ().abspath -#ugh -sys.path.append (os.path.join (srcdir, 'stepmake', 'bin')) - -try: - import packagepython - packagepython.Package (srcdir) - packagepython.version_tuple_to_str (package.version) -except: - print '*** FIXME: no packagepython. setting version to 1.0' - class Package: - name = 'lilypond' - release_dir = '.' - package = Package - version = '1.0' - -ENV = { 'PYTHONPATH': '' } -for key in ['GUILE_LOAD_PATH', 'LD_LIBRARY_PATH', 'PATH', 'PKG_CONFIG_PATH', - 'PYTHONPATH', 'TEXMF']: - if os.environ.has_key (key): - ENV[key] = os.environ[key] - -ENV['PYTHONPATH'] = os.path.join (srcdir, 'python') + ':' + ENV['PYTHONPATH'] - -env = Environment ( - ENV = ENV, - BYTEORDER = sys.byteorder.upper (), - CC = '$GCC', - CXX = '$GXX', - CPPDEFINES = '-DHAVE_CONFIG_H', - MAKEINFO = 'LANG= makeinfo', - MF_TO_TABLE_PY = srcdir + '/buildscripts/mf-to-table.py', - - PKG_CONFIG_PATH = [os.path.join (os.environ['HOME'], - 'usr/pkg/gnome/lib'), - os.path.join (os.environ['HOME'], - 'usr/pkg/pango/lib')], - GZIP='-9v', - MFMODE = 'ljfour', - TOPLEVEL_VERSION = version, - ) - -Help (usage + opts.GenerateHelpText (env)) - -# Add all config_vars to opts, so that they will be read and saved -# together with the other configure options. -map (lambda x: opts.AddOptions ((x,)), config_vars) -opts.Update (env) - -for key in config_vars: - if os.environ.has_key (key): - env[key] = os.environ[key] - -if env['fast']: - # Usability switch (Anthony Roach). - # See http://www.scons.org/cgi-bin/wiki/GoFastButton - # First do: scons realclean . - env['checksums'] = 0 - SetOption ('max_drift', 1) - SetOption ('implicit_cache', 1) -elif env['checksums']: - # Always use checksums (makes more sense than timestamps). - SetOption ('max_drift', 0) - # Using *content* checksums prevents rebuilds after - # [re]configure if config.hh has not changed. Too bad that it - # is unusably slow. - TargetSignatures ('content') - -absbuild = Dir (env['build']).abspath -outdir = os.path.join (Dir (env['build']).abspath, env['out']) -run_prefix = os.path.join (absbuild, os.path.join (env['out'], 'usr')) - - -config_hh = os.path.join (outdir, 'config.hh') -version_hh = os.path.join (outdir, 'version.hh') - -env.Alias ('config', config_cache) - -cachedir = os.path.join (outdir, 'build-cache') - -if not os.path.exists (cachedir): - os.makedirs (cachedir) - -CacheDir (cachedir) - -# No need to set $LILYPOND_DATADIR to run lily, but cannot install... -if env['debugging'] and not 'install' in COMMAND_LINE_TARGETS: - env['prefix'] = run_prefix - -prefix = env['prefix'] -bindir = os.path.join (prefix, 'bin') -sharedir = os.path.join (prefix, 'share') -libdir = os.path.join (prefix, 'lib') -libdir_package = os.path.join (libdir, package.name) -libdir_package_version = os.path.join (libdir_package, version) -localedir = os.path.join (sharedir, 'locale') -sharedir_doc_package = os.path.join (sharedir, 'doc', package.name) -sharedir_package = os.path.join (sharedir, package.name) -sharedir_package_version = os.path.join (sharedir_package, version) -lilypondprefix = sharedir_package_version - -# junkme -env.Append ( - absbuild = absbuild, - srcdir = srcdir, - ) - - - -def symlink_tree (target, source, env): - def mkdirs (dir): - def mkdir (dir): - if not dir: - os.chdir (os.sep) - return - if not os.path.isdir (dir): - if os.path.exists (dir): - os.unlink (dir) - os.mkdir (dir) - os.chdir (dir) - map (mkdir, string.split (dir, os.sep)) - def symlink (src, dst): - os.chdir (absbuild) - dir = os.path.dirname (dst) - mkdirs (dir) - if src[0] == '#': - frm = os.path.join (srcdir, src[1:]) - else: - depth = len (string.split (dir, '/')) - if src.find ('@') > -1: - frm = os.path.join ('../' * depth, - string.replace (src, '@', - env['out'])) - else: - frm = os.path.join ('../' * depth, src, - env['out']) - if src[-1] == '/': - frm = os.path.join (frm, os.path.basename (dst)) - if env['verbose']: - print 'ln -s %s -> %s' % (frm, os.path.basename (dst)) - os.symlink (frm, os.path.basename (dst)) - shutil.rmtree (run_prefix) - prefix = os.path.join (env['out'], 'usr') - map (lambda x: symlink (x[0], os.path.join (prefix, - x[1] % {'ver' : version})), - # ^# := source dir - # @ := out - # /$ := add dst file_name - (('python', 'lib/lilypond/python'), - # ugh - ('python', 'share/lilypond/%(ver)s/python'), - ('lily/', 'bin/lilypond'), - ('scripts/', 'bin/convert-ly'), - ('scripts/', 'bin/lilypond-book'), - ('scripts/', 'bin/ps2png'), - ('mf', 'share/lilypond/%(ver)s/dvips/mf-out'), - ('#ps/music-drawing-routines.ps', - 'share/lilypond/%(ver)s/tex/music-drawing-routines.ps'), - ('mf', 'share/lilypond/%(ver)s/otf'), - ('mf', 'share/lilypond/%(ver)s/tfm'), - ('tex', 'share/lilypond/%(ver)s/tex/enc'), - ('#mf', 'share/lilypond/%(ver)s/fonts/mf'), - ('mf', 'share/lilypond/%(ver)s/fonts/map'), - ('mf', 'share/lilypond/%(ver)s/fonts/otf'), - ('mf', 'share/lilypond/%(ver)s/fonts/tfm'), - ('mf', 'share/lilypond/%(ver)s/fonts/type1'), - ('#tex', 'share/lilypond/%(ver)s/tex/source'), - ('tex', 'share/lilypond/%(ver)s/tex/tex-out'), - ('mf', 'share/lilypond/%(ver)s/tex/mf-out'), - ('#ly', 'share/lilypond/%(ver)s/ly'), - ('#scm', 'share/lilypond/%(ver)s/scm'), - ('#scripts', 'share/lilypond/%(ver)s/scripts'), - ('#ps', 'share/lilypond/%(ver)s/ps'), - ('po/@/nl.mo', 'share/locale/nl/LC_MESSAGES/lilypond.mo'), - ('elisp', 'share/lilypond/%(ver)s/elisp'))) - - print "FIXME: BARF BARF BARF" - os.chdir (absbuild) - out = env['out'] - ver = version - prefix = os.path.join (env['out'], 'usr/share/lilypond/%(ver)s/fonts' - % vars ()) - for ext in ('enc', 'map', 'otf', 'svg', 'tfm', 'pfa'): - dir = os.path.join (absbuild, prefix, ext) - os.system ('rm -f ' + dir) - mkdirs (dir) - os.chdir (dir) - os.system ('ln -s ../../../../../../../mf/%(out)s/*.%(ext)s .' - % vars ()) - os.chdir (srcdir) - -def configure (target, source, env): - dre = re.compile ('\n(200[0-9]{5})') - vre = re.compile ('.*?\n[^-.0-9]*([0-9][0-9]*\.[0-9]([.0-9]*[0-9])*)', - re.DOTALL) - def get_version (program): - command = '(pkg-config --modversion %(program)s || %(program)s --version || %(program)s -V) 2>&1' % vars () - pipe = os.popen (command) - output = pipe.read () - if pipe.close (): - return None - splits = re.sub ('^|\s', '\n', output) - date_hack = re.sub (dre, '\n0.0.\\1', splits) - m = re.match (vre, date_hack) - v = m.group (1) - if v[-1] == '\n': - v = v[:-1] - return string.split (v, '.') - - def test_version (lst, full_name, minimal, description, package): - program = os.path.basename (full_name) - sys.stdout.write ('Checking %s version... ' % program) - actual = get_version (program) - if not actual: - print 'not found' - lst.append ((description, package, minimal, program, - 'not installed')) - return 0 - print string.join (actual, '.') - if map (string.atoi, actual) \ - < map (string.atoi, string.split (minimal, '.')): - lst.append ((description, package, minimal, program, - string.join (actual, '.'))) - return 0 - return 1 - - def test_program (lst, program, minimal, description, package): - key = program.upper () - if key.find ('+-'): - key = re.sub ('\+', 'X', key) - key = re.sub ('-', '_', key) - sys.stdout.write ('Checking for %s ... ' % program) - if env.has_key (key): - f = env[key] - sys.stdout.write ('(cached) ') - else: - f = WhereIs (program) - env[key] = f - if not f: - print 'not found' - lst.append ((description, package, minimal, program, - 'not installed')) - return 0 - print f - return test_version (lst, program, minimal, description, package) - - def test_lib (lst, program, minimal, description, package): - # FIXME: test for Debian or RPM (or -foo?) based dists - # to guess (or get correct!: apt-cache search?) - # package name. - #if os.system ('pkg-config --atleast-version=0 freetype2'): - # barf - if test_version (lst, program, minimal, description, - 'lib%(package)s-dev or %(package)s-devel' - % vars ()): - env.ParseConfig ('pkg-config --cflags --libs %(program)s' - % vars ()) - return 1 - return 0 - - required = [] - test_program (required, 'bash', '2.0', 'Bash', 'bash') - test_program (required, 'gcc', '4.0', 'GNU C compiler', 'gcc') - test_program (required, 'g++', '4.0.5', 'GNU C++ compiler', 'g++') - test_program (required, 'guile-config', '1.8', 'GUILE development', - 'libguile-dev or guile-devel') - test_program (required, 'mf', '0.0', 'Metafont', 'tetex-bin') - test_program (required, 'python', '2.1', 'Python (www.python.org)', - 'python') - # Silly, and breaks with /bin/sh == dash - #test_program (required, 'sh', '0.0', 'Bourne shell', 'sh') - - optional = [] - # Do not use bison 1.50 and 1.75. - #test_program (optional, 'foo', '2.0', 'Foomatic tester', 'bar') - test_program (optional, 'bison', '1.25', 'Bison -- parser generator', - 'bison') - test_program (optional, 'fontforge', '0.0.20050624', 'FontForge', - 'fontforge') - test_program (optional, 'flex', '0.0', 'Flex -- lexer generator', - 'flex') - test_program (optional, 'guile', '1.8', 'GUILE scheme', 'guile') - test_program (optional, 'gs', '8.15', - 'Ghostscript PostScript interpreter', - 'gs or gs-afpl or gs-esp or gs-gpl') - test_program (optional, 'makeinfo', '4.8', 'Makeinfo tool', 'texinfo') - test_program (optional, 'perl', '4.0', - 'Perl practical efficient readonly language', 'perl') - - def CheckYYCurrentBuffer (context): - context.Message ('Checking for yy_current_buffer... ') - ret = conf.TryCompile ("""using namespace std; - #include - class yy_flex_lexer: public yyFlexLexer - { - public: - yy_flex_lexer () - { - yy_current_buffer = 0; - } - };""", '.cc') - context.Result (ret) - return ret - - conf = Configure (env, custom_tests = { 'CheckYYCurrentBuffer' - : CheckYYCurrentBuffer }) - - defines = { - 'DIRSEP' : "'%s'" % os.sep, - 'PATHSEP' : "'%s'" % os.pathsep, - 'PACKAGE': '"%s"' % package.name, - 'DATADIR' : '"%s"' % sharedir, - 'PACKAGE_DATADIR' : '"%s"' % sharedir_package, - 'LOCALEDIR' : '"%s"' %localedir, - } - conf.env.Append (DEFINES = defines) - - command = r"""python -c 'import sys; sys.stdout.write ("%s/include/python%s" % (sys.prefix, sys.version[:3]))'""" #" - PYTHON_INCLUDE = os.popen (command).read ()#[:-1] - if env['fast']: - env.Append (CCFLAGS = ['-I%s' % PYTHON_INCLUDE]) - else: - env.Append (CPPPATH = [PYTHON_INCLUDE]) - - headers = ('assert.h', 'grp.h', 'libio.h', 'pwd.h', - 'sys/stat.h', 'utf8/wchar.h', 'wchar.h', 'Python.h') - for i in headers: - if conf.CheckCHeader (i): - key = re.sub ('[./]', '_', 'HAVE_' + string.upper (i)) - conf.env['DEFINES'][key] = 1 - - ccheaders = ('sstream',) - for i in ccheaders: - if conf.CheckCXXHeader (i): - key = re.sub ('[./]', '_', 'HAVE_' + string.upper (i)) - conf.env['DEFINES'][key] = 1 - - functions = ('chroot', 'fopencookie', 'funopen', - 'gettext', 'isinf', - 'mbrtowc', 'memmem', 'snprintf', 'vsnprintf', 'wcrtomb') - for i in functions: - if 0 or conf.CheckFunc (i): - key = re.sub ('[./]', '_', 'HAVE_' + string.upper (i)) - conf.env['DEFINES'][key] = 1 - - if conf.CheckYYCurrentBuffer (): - conf.env['DEFINES']['HAVE_FLEXLEXER_YY_CURRENT_BUFFER'] = 1 - - if conf.CheckLib ('dl'): - pass - - if env['fast']: - cpppath = [] - if env.has_key ('CPPPATH'): - cpppath = env['CPPPATH'] - - ## FIXME: linkage, check for libguile.h and scm_boot_guile - #this could happen after flower... - env.ParseConfig ('guile-config compile') - - test_program (required, 'pkg-config', '0.9.0', - 'pkg-config library compile manager', 'pkg-config') - if test_lib (required, 'freetype2', '0.0', - 'Development files for FreeType 2 font engine', - 'freetype6'): - conf.env['DEFINES']['HAVE_FREETYPE2'] = '1' - - if test_lib (required, 'pangoft2', '1.6.0', - 'Development files for pango, with FreeType2', - 'pango1.0'): - conf.env['DEFINES']['HAVE_PANGO_FT2'] = '1' - - if test_lib (optional, 'fontconfig', '2.2.0', - 'Development files for fontconfig', 'fontconfig1'): - conf.env['DEFINES']['HAVE_FONTCONFIG'] = '1' - - #this could happen only for compiling pango-* - if env['gui']: - test_lib (required, 'gtk+-2.0', '2.4.0', - 'Development files for GTK+', 'gtk2.0') - - if env['fast']: - # Using CCFLAGS = -I rather than CPPPATH = [ - # ] speeds up SCons - env['CCFLAGS'] += map (lambda x: '-I' + x, - env['CPPPATH'][len (cpppath):]) - env['CPPPATH'] = cpppath - - if required: - print - print '********************************' - print 'Please install required packages' - for i in required: - print '%s: %s-%s or newer (found: %s %s)' % i - Exit (1) - - if optional: - print - print '*************************************' - print 'Consider installing optional packages' - for i in optional: - print '%s: %s-%s or newer (found: %s %s)' % i - - return conf.Finish () - -def config_header (target, source, env): - config = open (str (target[0]), 'w') - for i in sorted (env['DEFINES'].keys ()): - config.write ('#define %s %s\n' % (i, env['DEFINES'][i])) - config.close () -env.Command (config_hh, config_cache, config_header) - -# hmm? -def xuniquify (lst): - n = [] - for i in lst: - if not i in n: - n.append (i) - lst = n - return lst - -def uniquify (lst): - d = {} - n = len (lst) - i = 0 - while i < n: - if not d.has_key (lst[i]): - d[lst[i]] = 1 - i += 1 - else: - del lst[i] - n -= 1 - return lst - -def uniquify_config_vars (env): - for i in config_vars: - if env.has_key (i) and type (env[i]) == type ([]): - env[i] = uniquify (env[i]) - -def save_config_cache (env): - ## FIXME: Is this smart, using option cache for saving - ## config.cache? I cannot seem to find the official method. - uniquify_config_vars (env) - opts.Save (config_cache, env) - - if 'config' in COMMAND_LINE_TARGETS: - sys.stdout.write ('\n') - sys.stdout.write ('LilyPond configured') - sys.stdout.write ('\n') - sys.stdout.write ('Now run') - sys.stdout.write ('\n') - sys.stdout.write (' scons [TARGET|DIR]...') - sys.stdout.write ('\n') - sys.stdout.write ('\n') - sys.stdout.write ('Examples:') - sys.stdout.write ('\n') - sys.stdout.write (' scons lily # build lilypond') - sys.stdout.write ('\n') - sys.stdout.write (' scons all # build everything') - sys.stdout.write ('\n') - sys.stdout.write (' scons doc # build documentation') - sys.stdout.write ('\n') - ## TODO - ## sys.stdout.write (' scons prefix=/usr DESTDIR=/tmp/pkg all install') - ## sys.stdout.write ('\n') - Exit (0) - elif not env['checksums']: - # When using timestams, config.hh is NEW. The next - # build triggers recompilation of everything. Exiting - # here makes SCons use the actual timestamp for config.hh - # and prevents recompiling everything the next run. - command = sys.argv[0] + ' ' + string.join (COMMAND_LINE_TARGETS) - sys.stdout.write ('Running %s ... ' % command) - sys.stdout.write ('\n') - s = os.system (command) - Exit (s) - -# WTF? -# scons: *** Calling Configure from Builders is not supported. -# env.Command (config_cache, None, configure) -if not os.path.exists (config_cache) \ - or (os.stat ('SConstruct')[stat.ST_MTIME] - > os.stat (config_cache)[stat.ST_MTIME]): - env = configure (None, None, env) - save_config_cache (env) -elif env['checksums']: - # just save everything - save_config_cache (env) - -#urg how does #/ subst work? -Export ('env') -SConscript ('buildscripts/builder.py') - -env.PrependENVPath ('PATH', - os.path.join (env['absbuild'], env['out'], 'usr/bin')) - -LILYPOND_DATADIR = os.path.join (run_prefix, 'share/lilypond/', version) - -if not os.path.exists (LILYPOND_DATADIR): - os.makedirs (LILYPOND_DATADIR) - -env.Command (LILYPOND_DATADIR, ['#/SConstruct', '#/VERSION'], symlink_tree) -env.Depends ('lily', LILYPOND_DATADIR) - -env.Append (ENV = { - 'LILYPOND_DATADIR' : LILYPOND_DATADIR, - 'TEXMF' : '{$LILYPOND_DATADIR,' - + os.popen ('kpsexpand \$TEXMF').read ()[:-1] + '}', - }) - -BUILD_ABC2LY = '${set__x}$PYTHON $srcdir/scripts/abc2ly.py' -BUILD_LILYPOND = '$absbuild/lily/$out/lilypond ${__verbose}' -BUILD_LILYPOND_BOOK = '$PYTHON $srcdir/scripts/lilypond-book.py ${__verbose}' - -if env['verbose'] and env['verbose'] != '0': - env['__verbose'] = ' --verbose' - env['set__x'] = 'set -x;' - -# post-option environment-update -env.Append ( - bindir = bindir, - sharedir = sharedir, - lilypond_datadir = sharedir_package, - localedir = localedir, - local_lilypond_datadir = sharedir_package_version, - lilypondprefix = lilypondprefix, - sharedir_package = sharedir_package, - sharedir_doc_package = sharedir_doc_package, - sharedir_package_version = sharedir_package_version, - libdir_package = libdir_package, - libdir_package_version = libdir_package_version, - - LILYPOND = BUILD_LILYPOND, - ABC2LY = BUILD_ABC2LY, - LILYPOND_BOOK = BUILD_LILYPOND_BOOK, - LILYPOND_BOOK_FORMAT = 'texi-html', - MAKEINFO_FLAGS = '--css-include=$srcdir/Documentation/texinfo.css', - ) - -env.Append (CCFLAGS = ['-pipe', '-Wno-pmf-conversions']) -if env['debugging']: - env.Append (CCFLAGS = ['-g']) -if env['optimising']: - env.Append (CCFLAGS = '-O2') -if env['warnings']: - env.Append (CCFLAGS = ['-W', '-Wall']) - env.Append (CXXFLAGS = ['-Wconversion']) - -# ugr,huh? -env.Append (LINKFLAGS = ['-Wl,--export-dynamic']) -# FIXME: ParseConfig ignores -L flag? -env.Append (LINKFLAGS = ['-L/usr/X11R6/lib']) - -## Explicit target and dependencies - -if 'clean' in COMMAND_LINE_TARGETS: - # ugh: prevent reconfigure instead of clean - os.system ('touch %s' % config_cache) - - command = sys.argv[0] + ' -c .' - sys.stdout.write ('Running %s ... ' % command) - sys.stdout.write ('\n') - s = os.system (command) - if os.path.exists (config_cache): - os.unlink (config_cache) - Exit (s) - -if 'sconsclean' in COMMAND_LINE_TARGETS: - command = 'rm -rf scons.cache $(find . -name ".scon*")' - s = os.system (command) - if os.path.exists (config_cache): - os.unlink (config_cache) - Exit (s) - -if 'realclean' in COMMAND_LINE_TARGETS: - command = 'rm -rf $(find . -name "out-scons" -o -name ".scon*")' - sys.stdout.write ('Running %s ... ' % command) - sys.stdout.write ('\n') - s = os.system (command) - if os.path.exists (config_cache): - os.unlink (config_cache) - Exit (s) - -# Declare SConscript phonies -env.Alias ('minimal', config_cache) - -if 0: - env.Alias ('mf-essential', config_cache) - env.Alias ('minimal', ['python', 'lily', 'mf-essential']) - env.Alias ('all', ['minimal', 'mf', '.']) - -else: - env.Alias ('minimal', ['python', 'lily', 'mf']) - env.Alias ('all', ['minimal', '.']) - - -# Do we want the doc/web separation? -env.Alias ('doc', - ['minimal', - 'Documentation', - 'Documentation/user', - 'Documentation/topdocs', - 'Documentation/bibliography', - 'input']) - -# Without target arguments, do minimal build -if not COMMAND_LINE_TARGETS: - env.Default (['minimal']) - -# GNU Make rerouting compat: -env.Alias ('web', 'doc') - - -env.Command (version_hh, '#/VERSION', - '$PYTHON ./stepmake/bin/make-version.py VERSION > $TARGET') - -# post-config environment update -env.Append ( - run_prefix = run_prefix, - LILYPOND_DATADIR = LILYPOND_DATADIR, - - # FIXME: move to lily/SConscript? - LIBPATH = [os.path.join (absbuild, 'flower', env['out'])], - CPPPATH = [outdir, ], - LILYPOND_PATH = ['.', - '$srcdir/input', - '$srcdir/input/regression', - '$srcdir/input/test', - '$srcdir/input/tutorial', - '$srcdir/Documentation/user', - '$absbuild/mf/$out', -# os.path.join (absbuild, 'Documentation', -# env['out']), -# os.path.join (absbuild, 'Documentation/user', -# env['out']), - ], - MAKEINFO_PATH = ['.', '$srcdir/Documentation/user', - '$absbuild/Documentation/user/$out'], - ) - -#### dist, tar -def plus (a, b): - a + b - -def cvs_entry_is_dir (line): - return line[0] == 'D' and line[-2] == '/' - -def cvs_entry_is_file (line): - return line[0] == '/' and line[-2] == '/' - -def cvs_dirs (dir): - entries = os.path.join (dir, 'CVS/Entries') - if not os.path.exists (entries): - return [] - entries = open (entries).readlines () - dir_entries = filter (cvs_entry_is_dir, entries) - dirs = map (lambda x: os.path.join (dir, x[2:x[2:].index ('/')+3]), - dir_entries) - return dirs + map (cvs_dirs, dirs) - -def cvs_files (dir): - entries = os.path.join (dir, 'CVS/Entries') - if not os.path.exists (entries): - return [] - entries = open (entries).readlines () - file_entries = filter (cvs_entry_is_file, entries) - files = map (lambda x: x[1:x[1:].index ('/')+1], file_entries) - return map (lambda x: os.path.join (dir, x), files) - -def flatten (tree, lst): - if type (tree) == type ([]): - for i in tree: - if type (i) == type ([]): - flatten (i, lst) - else: - lst.append (i) - return lst - -if os.path.isdir ('%(srcdir)s/CVS' % vars ()): - subdirs = flatten (cvs_dirs ('.'), []) -else: - # ugh - command = 'cd %(srcdir)s \ - && find . -name SConscript | sed s@/SConscript@@' % vars () - subdirs = string.split (os.popen (command).read ()) - -if env['fast']\ - and 'all' not in COMMAND_LINE_TARGETS\ - and 'doc' not in COMMAND_LINE_TARGETS\ - and 'web' not in COMMAND_LINE_TARGETS\ - and 'install' not in COMMAND_LINE_TARGETS\ - and 'clean' not in COMMAND_LINE_TARGETS: - subdirs = [ 'python', - 'lily', - 'flower', - 'mf', - ] - -if os.path.isdir ('%(srcdir)s/CVS' % vars ()): - src_files = reduce (lambda x, y: x + y, map (cvs_files, subdirs)) -else: - src_files = ['foobar'] - -readme_files = ['AUTHORS', 'README', 'INSTALL', 'NEWS'] -txt_files = map (lambda x: x + '.txt', readme_files) - - -# -# speeds up build by +- 5% -# -if not env['fast']: - foo = map (lambda x: env.TXT (x + '.txt', - os.path.join ('Documentation/topdocs', x)), - readme_files) - tar_base = package.name + '-' + version - tar_name = tar_base + '.tar.gz' - ball_prefix = os.path.join (outdir, tar_base) - tar_ball = os.path.join (outdir, tar_name) - - dist_files = src_files + txt_files - ball_files = map (lambda x: os.path.join (ball_prefix, x), dist_files) - map (lambda x: env.Depends (tar_ball, x), ball_files) - map (lambda x: env.Command (os.path.join (ball_prefix, x), x, - 'ln $SOURCE $TARGET'), dist_files) - tar = env.Command (tar_ball, src_files, - ['rm -f $$(find $TARGET.dir -name .sconsign)', - 'tar czf $TARGET -C $TARGET.dir %s' % tar_base,]) - env.Alias ('tar', tar) - - dist_ball = os.path.join (package.release_dir, tar_name) - env.Command (dist_ball, tar_ball, - 'if [ -e $SOURCE -a -e $TARGET ]; then rm $TARGET; fi;' \ - + 'ln $SOURCE $TARGET') - env.Depends ('dist', dist_ball) - patch_name = os.path.join (outdir, tar_base + '.diff.gz') - patch = env.PATCH (patch_name, tar_ball) - env.Depends (patch_name, dist_ball) - env.Alias ('release', patch) - -#### web -if not env['fast']: - web_base = os.path.join (outdir, 'web') - web_ball = web_base + '.tar.gz' - env['footify'] = 'MAILADDRESS=bug-lilypond@gnu.org $PYTHON stepmake/bin/add-html-footer.py --name=lilypond --version=$TOPLEVEL_VERSION' - web_ext = ['.html', '.ly', '.midi', '.pdf', '.png', '.ps.gz', '.txt',] - web_path = '-path "*/$out/*"' + string.join (web_ext, ' -or -path "*/$out/*"') + '-or -type l' - env['web_path'] = web_path - web_list = os.path.join (outdir, 'weblist') - # compatible make heritits - # fixme: generate in $outdir is cwd/builddir - env.Command (web_list, - ## Adding 'doc' dependency is correct, but takes - ## > 5min extra if you have a peder :-) - #'doc', - - '#/VERSION', - ['$PYTHON buildscripts/mutopia-index.py -o examples.html ./', - 'cd $absbuild && $footify $$(find . -name "*.html" -print)', - 'cd $absbuild && rm -f $$(find . -name "*.html~" -print)', - 'cd $absbuild && find Documentation input $web_path \ - > $TARGET', - '''echo '' > $absbuild/index.html''', - '''echo 'Redirecting to the documentation index...' >> $absbuild/index.html''', - 'cd $absbuild && ls *.html >> $TARGET',]) - env.Command (web_ball, web_list, - ['cat $SOURCE | tar -C $absbuild -czf $TARGET -T -',]) - #env.Alias ('web', web_ball) - www_base = os.path.join (outdir, 'www') - www_ball = www_base + '.tar.gz' - env.Command (www_ball, web_ball, - ['rm -rf $out/tmp', - 'mkdir -p $absbuild/$out/tmp', - 'tar -C $absbuild/$out/tmp -xzf $SOURCE', - 'cd $absbuild/$out/tmp && for i in $$(find . -name "$out"); ' - + ' do mv $$i $$(dirname $$i)/out-www; done', - 'tar -C $absbuild/$out/tmp -czf $TARGET .']) - env.Alias ('web', www_ball) - -#### tags -env.Append ( - ETAGSFLAGS = """--regex='{c++}/^LY_DEFINE *(\([^,]+\)/\\1/' \ - --regex='{c++}/^LY_DEFINE *([^"]*"\([^"]+\)"/\\1/'""") -code_ext = ['.cc', '.hh', '.scm', '.tcc',] -env.Command ('TAGS', filter (lambda x: os.path.splitext (x)[1] in code_ext, - src_files), - 'etags $ETAGSFLAGS $SOURCES') - -# Note: SConscripts are only needed in directories where something needs -# to be done, building or installing -for d in subdirs: - if os.path.exists (os.path.join (d, 'SConscript')): - b = os.path.join (env['build'], d, env['out']) - # Support clean sourcetree build (--srcdir build) - # and ./out build. - if os.path.abspath (b) != os.path.abspath (d): - env.BuildDir (b, d, duplicate = 0) - SConscript (os.path.join (b, 'SConscript')) - -env.Command ('tree', ['#/VERSION', '#/SConstruct'], symlink_tree) diff --git a/THANKS b/THANKS index 8a077a16f8..6703d34481 100644 --- a/THANKS +++ b/THANKS @@ -1,4 +1,4 @@ -Release 2.11 +Release 2.12 ************ @@ -102,6 +102,7 @@ Luc Wehli Maarten Hijzelendoorn Marc Lanoiselée Mark Polesky +Matthieu Jacquot Matthijs Frankeno Martijn Vromans Marnen Laibow-Koser diff --git a/buildscripts/GNUmakefile b/buildscripts/GNUmakefile deleted file mode 100644 index 3a40aa0fad..0000000000 --- a/buildscripts/GNUmakefile +++ /dev/null @@ -1,15 +0,0 @@ -depth = .. - -STEPMAKE_TEMPLATES=script install po -EXTRA_DIST_FILES=pfx2ttf.fontforge - -include $(depth)/make/stepmake.make - -# Should we install these? This should be handled by sysadmin or -# packager but if she forgets... -#INSTALLATION_OUT_SUFFIXES=1 -#INSTALLATION_OUT_DIR1=$(local_lilypond_datadir)/scripts -#INSTALLATION_OUT_FILES1=$(outdir)/lilypond-login $(outdir)/lilypond-profile - -all: $(INSTALLATION_FILES) - diff --git a/buildscripts/SConscript b/buildscripts/SConscript deleted file mode 100644 index 98d254f4b1..0000000000 --- a/buildscripts/SConscript +++ /dev/null @@ -1,5 +0,0 @@ -# -*-python-*- - -Import ('env') -sources = ['lilypond-profile.sh', 'lilypond-login.sh'] -gens = map (env.AT_COPY, sources) diff --git a/buildscripts/bib2html.py b/buildscripts/bib2html.py deleted file mode 100644 index c16f21cce2..0000000000 --- a/buildscripts/bib2html.py +++ /dev/null @@ -1,76 +0,0 @@ -#!@PYTHON@ -import os -import sys -import getopt -import tempfile - -# usage: -def usage (): - print 'usage: %s [-s style] [-o ] BIBFILES...' - -(options, files) = getopt.getopt (sys.argv[1:], 's:o:', []) - -output = 'bib.html' -style = 'long' - -for (o,a) in options: - if o == '-h' or o == '--help': - usage () - sys.exit (0) - elif o == '-s' or o == '--style': - style = a - elif o == '-o' or o == '--output': - output = a - else: - raise Exception ('unknown option: %s' % o) - - -if style not in ['alpha','index','long','longp','long-pario','short','short-pario','split']: - sys.stderr.write ("Unknown style \`%s'\n" % style) - -tempfile = tempfile.mktemp ('bib2html') - -if not files: - usage () - sys.exit (2) - - -def strip_extension (f, ext): - (p, e) = os.path.splitext (f) - if e == ext: - e = '' - return p + e - -nf = [] -for f in files: - nf.append (strip_extension (f, '.bib')) - -files = ','.join (nf) - -open (tempfile + '.aux', 'w').write (r''' -\relax -\citation{*} -\bibstyle{html-%(style)s} -\bibdata{%(files)s}''' % vars ()) - -cmd = "bibtex %s" % tempfile - -sys.stdout.write ("Invoking `%s'\n" % cmd) -stat = os.system (cmd) -if stat <> 0: - sys.exit(1) - - -#TODO: do tex -> html on output - -bbl = open (tempfile + '.bbl').read () - -open (output, 'w').write (bbl) - - -def cleanup (tempfile): - for a in ['aux','bbl', 'blg']: - os.unlink (tempfile + '.' + a) - -cleanup (tempfile) - diff --git a/buildscripts/build-coverage.sh b/buildscripts/build-coverage.sh deleted file mode 100644 index b86ebaaf52..0000000000 --- a/buildscripts/build-coverage.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/sh - -if test "$1" == "--fresh"; then - fresh=yes -fi - -if test ! -f config-cov.make; then - fresh=yes -fi - -if test "$fresh" = "yes"; -then - ./configure --enable-config=cov --disable-optimising \ - && make conf=cov -j2 clean \ - && perl -i~ -pe 's/-pipe /-fprofile-arcs -ftest-coverage -pipe /g' config-cov.make \ - && perl -i~ -pe 's/ -ldl / -lgcov -ldl /g' config-cov.make -else - find -name '*.gcda' -exec rm '{}' ';' -fi - -mkdir -p scripts/out-cov/ -touch scripts/out-cov/midi2ly scripts/out-cov/midi2ly.1 -make conf=cov -j2 && \ - make conf=cov test-clean OUT_TEST=testcov LILYPOND_JOBS= && \ - make conf=cov test OUT_TEST=testcov LILYPOND_JOBS='-dtrace-scheme-coverage ' - -if test "$?" != "0"; then - tail -100 out-cov/test-run.log - exit 1 -fi - -depth=../.. -resultdir=out/coverage-results - -rm -rf $resultdir -mkdir $resultdir -cd $resultdir - -ln $depth/lily/* . -ln $depth/scm/*.scm . -mv $depth/input/regression/out-testcov/*.scm.cov . -ln $depth/ly/*.ly . -ln $depth/lily/out-cov/*[ch] . -mkdir include -ln $depth/lily/include/* include/ -ln $depth/flower/include/* include/ -for a in *[cl] *.yy -do - gcov -o $depth/lily/out-cov/ -p $a > $a.gcov-summary -done - -python $depth/buildscripts/coverage.py --uncovered *.cc > uncovered.txt -python $depth/buildscripts/coverage.py --hotspots *.cc > hotspots.txt -python $depth/buildscripts/coverage.py --summary *.cc > summary.txt -python $depth/buildscripts/coverage.py --uncovered *.scm > uncovered-scheme.txt - -head -20 summary.txt - -cat < long-score.ly << EOF -\version "2.10.0" -foo = \new Staff \new Voice \repeat unfold 50 \relative { c4 d8[ d16( e]~ e16[ e e) f] g8 } -\score { - \new ChoirStaff << - \foo \foo \foo \foo - \foo \foo \foo \foo - - >> - \midi {} - \layout {} -} -EOF - -rm gmon.sum - -exe=$depth/out-prof/bin/lilypond - -## todo: figure out representative sample. -files="wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 mozart-hrn-3 mozart-hrn-3 long-score" - - - -$exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \ - -I $depth/input/mutopia/W.A.Mozart/ \ - $files - - -for a in *.profile; do - echo $a - cat $a -done - -echo 'running gprof' -gprof $exe > profile - -exit 0 - - -## gprof -s takes forever. -for a in seq 1 3; do - for f in $files ; do - $exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \ - -I $depth/input/mutopia/W.A.Mozart/ \ - $f - - echo 'running gprof' - if test -f gmon.sum ; then - gprof -s $exe gmon.out gmon.sum - else - mv gmon.out gmon.sum - fi - done -done - -gprof $exe gmon.sum > profile diff --git a/buildscripts/builder.py b/buildscripts/builder.py deleted file mode 100644 index 8313c37fec..0000000000 --- a/buildscripts/builder.py +++ /dev/null @@ -1,345 +0,0 @@ -# -*-python-*- - -import glob -import os -import string - -Import ('env') - -# utility - -def add_suffixes (target, source, env, target_suffixes, src_suffixes): - base = os.path.splitext (str (target[0]))[0] - return (target + map (lambda x: base + x, target_suffixes), - source + map (lambda x: base + x, src_suffixes)) - -# junkme; see _concat -def join_path (path, infix=os.pathsep, prefix = ''): - def dir (x): - if x and x[0] == '#': - return env['srcdir'] + x[1:] - return x - return string.join (map (lambda x: prefix + dir (x), path), infix) - - -def src_glob (s): - here = os.getcwd () - os.chdir (env.Dir ('.').srcnode ().abspath) - result = glob.glob (s) - os.chdir (here) - return result - -Export ('src_glob') - -def base_glob (s): - return map (lambda x: os.path.splitext (x)[0], src_glob (s)) - -Export ('base_glob') - -def install (target, dir): - dest = env['DESTDIR'] + dir - if type (target) == type ([]): - map (lambda x: env.Install (dir, x), target) - else: - env.Install (dir, target) - env.Alias ('install', dir) - -Export ('install') - -def _fixme (s): - x = string.replace (s, '#', env['srcdir']) - x = string.replace (x, '@', env['absbuild']) - return x - -# Clean separation between generic action + flags and actual -# configuration and flags in environment for this build. - -# Generic builders could/should be part of SCons. - - -HH = Builder (action = 'bison -d -o ${TARGET.base}.cc $SOURCE', - suffix = '.hh', src_suffix = '.yy') -env.Append (BUILDERS = {'HH' : HH}) - - -# Setup LilyPond environment. For the LilyPond build, we override -# some of these commands in the ENVironment. - -lilypond_book_flags = '''--format=$LILYPOND_BOOK_FORMAT --process="lilypond -I$srcdir -I$srcdir/input/test $__verbose --backend=eps --formats=ps,png --header=texidoc -dinternal-type-checking -ddump-signatures -danti-alias-factor=2" ''' - -env.Append ( - BSTINPUTS = '${SOURCE.dir}:${TARGET.dir}:', - BIB2HTML = '$PYTHON $srcdir/buildscripts/bib2html.py', - LILYOND_BOOK = 'lilypond-book', - LILYPOND_BOOK_FORMAT = '', - LILYPOND_BOOK_FLAGS = lilypond_book_flags, - LILYPOND_PATH = [], - # The SCons way around FOO_PATH: - LILYPOND_INCFLAGS = '$( ${_concat(INCPREFIX, LILYPOND_PATH, INCSUFFIX, __env__)} $)', - - MAKEINFO_PATH = [], - MAKEINFO_FLAGS = [], - MAKEINFO_INCFLAGS = '$( ${_concat(INCPREFIX, MAKEINFO_PATH, INCSUFFIX, __env__, RDirs)} $)', - #TEXI2DVI_FLAGS = [], - _TEXI2DVI_FLAGS = '$( ${_concat(" ", TEXI2DVI_FLAGS,)} $)', - ) - -TXT =\ - Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCFLAGS\ - --no-split --no-headers $SOURCE', - suffix = '.txt', src_suffix = '.texi') -env.Append (BUILDERS = {'TXT': TXT}) - -INFO =\ - Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCFLAGS $SOURCE', - suffix = '.info', src_suffix = '.texi') -env.Append (BUILDERS = {'INFO': INFO}) - -HTML =\ - Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCLUDES\ - --html --no-split --no-headers $MAKEINFO_FLAGS $SOURCE', -suffix = '.html', src_suffix = '.texi') -env.Append (BUILDERS = {'HTML': HTML}) - -TEXI =\ - Builder (action = - '$LILYPOND_BOOK --output=${TARGET.dir} \ - --include=${TARGET.dir} $LILYPOND_INCFLAGS \ - --process="$LILYPOND $LILYPOND_INCFLAGS" \ - $LILYPOND_BOOK_FLAGS \ - $SOURCE', - suffix = '.texi', src_suffix = '.tely') -env.Append (BUILDERS = {'TEXI': TEXI}) - -TEXIDVI =\ - Builder (action = 'cd ${TARGET.dir} && \ - texi2dvi --batch -I $srcdir/Documentation/user $_TEXI2DVI_FLAGS ${SOURCE.file}', - suffix = '.dvi', src_suffix = '.texi') -env.Append (BUILDERS = {'TEXIDVI': TEXIDVI}) - -DVIPS =\ - Builder (action = 'TEXINPUTS=${TARGET.dir}:$$TEXINPUTS $DVIPS -o $TARGET $DVIPS_FLAGS $SOURCE', - suffix = '.ps', src_suffix = '.dvi') -env.Append (BUILDERS = {'DVIPS': DVIPS}) - -DVIPDF =\ - Builder (action = 'TEXINPUTS=${TARGET.dir}:$$TEXINPUTS $DVIPS -o $TARGET -Ppdf $DVIPS_FLAGS $SOURCE', - suffix = '.pdfps', src_suffix = '.dvi') -env.Append (BUILDERS = {'DVIPDF': DVIPDF}) - -PSPDF =\ - Builder (action = 'ps2pdf $PSPDF_FLAGS $SOURCE $TARGET', - suffix = '.pdf', src_suffix = '.pdfps') -env.Append (BUILDERS = {'PSPDF': PSPDF}) - -PNG2EPS =\ - Builder (action = 'convert $SOURCE $TARGET', - suffix = '.eps', src_suffix = '.png') -env.Append (BUILDERS = {'PNG2EPS': PNG2EPS}) - -EPS2PNG =\ - Builder (action = 'convert $SOURCE $TARGET', - suffix = '.png', src_suffix = '.eps') -env.Append (BUILDERS = {'EPS2PNG': EPS2PNG}) - -def add_ps_target (target, source, env): - base = os.path.splitext (str (target[0]))[0] - return (target + [base + '.ps'], source) - -lilypond =\ - Builder (action = '$LILYPOND --output=${TARGET.base} --include=${TARGET.dir} $SOURCE', - suffix = '.pdf', src_suffix = '.ly') -## emitter = add_ps_target) -env.Append (BUILDERS = {'LilyPond': lilypond}) - -ABC = Builder (action = '$ABC2LY --output=${TARGET} --strict $SOURCE', - suffix = '.ly', src_suffix = '.abc') -env.Append (BUILDERS = {'ABC': ABC}) - -def add_log_target (target, source, env): - base = os.path.splitext (str (target[0]))[0] - return (target + [base + '.log'], source) - -def add_tfm_target (target, source, env): - base = os.path.splitext (str (target[0]))[0] - return (target + [base + '.tfm'], source) - -def add_lisp_enc_target (target, source, env): - base = os.path.splitext (str (target[0]))[0] - return (target + [base + '.lisp', base + '.enc'], - source) - -def add_cff_cffps_svg (target, source, env): - base = os.path.splitext (str (target[0]))[0] - return (target + [base + '.cff', base + '.cff.ps', base + '.svg'], - source) - -a = 'cd ${TARGET.dir} \ -&& MFINPUTS=.:${SOURCE.dir}:$srcdir/${SOURCE.dir}: \ -$MF "\\mode:=$MFMODE; nonstopmode; input ${SOURCE.filebase};" \ -| grep -v "@\|>>\|w:\|h:";' -tfm = Builder (action = a, suffix = '.tfm', src_suffix = '.mf', -# emitter = lambda t, s, e: add_suffixes (t, s, e, ['.log'], [])) - emitter = add_log_target) -env.Append (BUILDERS = {'TFM': tfm}) - -a = '$PYTHON $MF_TO_TABLE_PY \ ---outdir=${TARGET.dir} \ ---global-lisp=${TARGET.base}.otf-gtable \ ---lisp=${TARGET.base}.lisp \ ---enc=${TARGET.base}.enc \ -${TARGET.base}.log' -gtable = Builder (action = a, suffix = '.otf-gtable', src_suffix = '.log', - emitter = add_lisp_enc_target) -env.Append (BUILDERS = {'GTABLE': gtable}) - -def add_enc_src (target, source, env): - base = os.path.splitext (str (target[0]))[0] - #return (target, source + [base + '.enc']) - return (target + [base + '.pfb', base + '.svg'], source + [base + '.enc']) - -def add_svg (target, source, env): - base = os.path.splitext (str (target[0]))[0] - return (target + [base + '.svg'], source) - -# FIXME UGH, should fix --output option for mftrace -a = 'cd ${TARGET.dir} && \ -if test -e ${SOURCE.filebase}.enc; then encoding="--encoding=${SOURCE.filebase}.enc"; fi; \ -MFINPUTS=$srcdir/mf:.: \ -$MFTRACE --formats=pfa,pfb,svg --simplify --keep-trying --no-afm \ -$$encoding $__verbose \ ---include=${TARGET.dir} \ -${SOURCE.file}' - -pfa = Builder (action = a, - suffix = '.pfa', - src_suffix = '.mf', - emitter = add_enc_src) -env.Append (BUILDERS = {'PFA': pfa}) - -a = ['(cd ${TARGET.dir} && $FONTFORGE -script ${SOURCE.file})', -# '$PYTHON $srcdir/buildscripts/ps-embed-cff.py ${SOURCE.base}.cff $$(cat ${SOURCE.base}.fontname) ${SOURCE.base}.cff.ps', - 'rm -f ${TARGET.dir}/*.scale.pfa'] -otf = Builder (action = a, - suffix = '.otf', - src_suffix = '.pe', -# emitter = add_cff_cffps_svg - emitter = add_svg - ) -env.Append (BUILDERS = {'OTF': otf}) - - -# Specific builders - -env['DIFF_PY'] = '$srcdir/stepmake/bin/package-diff.py' -a = '$PYTHON $DIFF_PY $NO__verbose --outdir=${TARGET.dir}' -patch = Builder (action = a, suffix = '.diff', src_suffix = '.tar.gz') -env.Append (BUILDERS = {'PATCH': patch}) - -atvars = [ -'BASH', -'DATE', -'sharedstatedir', -'GUILE', -'bindir', -'date', -'datadir', -'lilypond_datadir', -'lilypond_libdir', -'local_lilypond_datadir', -'local_lilypond_libdir', -'localedir', -'PACKAGE', -'package', -'PATHSEP', -'PERL', -'prefix', -'program_prefix', -'program_suffix', -'PYTHON', -'SHELL', -'TOPLEVEL_VERSION', -'step-bindir', -] - -def at_copy (target, source, env): - n = str (source[0]) - s = open (n).read () - for i in atvars: - if env.has_key (i): - s = string.replace (s, '@%s@'% i, env[i]) - t = str (target[0]) - open (t, 'w').write (s) - # wugh - if os.path.basename (os.path.dirname (str (target[0]))) == 'bin': - os.chmod (t, 0755) - -AT_COPY = Builder (action = at_copy, src_suffix = ['.in', '.py', '.sh',]) -env.Append (BUILDERS = {'AT_COPY': AT_COPY}) - -MO = Builder (action = 'msgfmt -o $TARGET $SOURCE', - suffix = '.mo', src_suffix = '.po') -env.Append (BUILDERS = {'MO': MO}) - -ugh = 'ln -f po/lilypond.pot ${TARGET.dir}/lilypond.po ; ' -a = ugh + 'xgettext --default-domain=lilypond --join \ ---output-dir=${TARGET.dir} --add-comments \ ---keyword=_ --keyword=_f --keyword=_i $SOURCES' -PO = Builder (action = a, suffix = '.pot', - src_suffix = ['.cc', '.hh', '.py'], multi = 1) -env['potarget'] = os.path.join (env['absbuild'], 'po', env['out'], - 'lilypond.pot') -env['pocommand'] = a - -ugh = '; mv ${TARGET} ${SOURCE}' -a = 'msgmerge ${SOURCE} ${SOURCE.dir}/lilypond.pot -o ${TARGET}' + ugh -POMERGE = Builder (action = a, suffix = '.pom', src_suffix = '.po') -env.Append (BUILDERS = {'POMERGE': POMERGE}) - -a = 'BSTINPUTS=$BSTINPUTS $BIB2HTML -o $TARGET $SOURCE' -BIB2HTML = Builder (action = a, suffix = '.html', src_suffix = '.bib') -env.Append (BUILDERS = {'BIB2HTML': BIB2HTML}) - -a = '$PYTHON $srcdir/buildscripts/lys-to-tely.py \ ---name=${TARGET.base} --title="$TITLE" $SOURCES' -LYS2TELY = Builder (action = a, suffix = '.tely', src_suffix = '.ly') -env.Append (BUILDERS = {'LYS2TELY': LYS2TELY}) - - -def mutopia (ly=None, abc=None): - e = env.Copy ( - LILYPOND_BOOK_FLAGS = lilypond_book_flags, - ) - - if not abc: - abc = base_glob ('*.abc') - if not ly: - ly = base_glob ('*.ly') + map (e.ABC, abc) - pdf = map (e.LilyPond, ly) - env.Depends (pdf, ['#/lily', '#/mf']) - env.Alias ('doc', pdf) - -Export ('mutopia') - -def collate (title = 'collated files'): - ly = base_glob ('*.ly') - - e = env.Copy ( - TITLE = title, - LILYPOND_BOOK_FLAGS = lilypond_book_flags, - # __verbose = ' --verbose', - ) - tely = e.LYS2TELY ('collated-files', ly) - texi = e.TEXI (tely) - env.Depends (texi, ['#/lily', '#/mf']) - dvi = e.TEXIDVI (texi) - pspdf = e.DVIPDF (dvi) - pdf = e.PSPDF (pspdf) - html = e.HTML (texi) - - env.Alias ('doc', pdf) - env.Alias ('doc', html) - -Export ('collate') - -Export ('env') diff --git a/buildscripts/buildlib.py b/buildscripts/buildlib.py deleted file mode 100644 index cd99586ff8..0000000000 --- a/buildscripts/buildlib.py +++ /dev/null @@ -1,42 +0,0 @@ -#!@PYTHON@ - -import subprocess -import re -import sys - -verbose = False - -def read_pipe (command): - child = subprocess.Popen (command, - stdout = subprocess.PIPE, - stderr = subprocess.PIPE, - shell = True) - (output, error) = child.communicate () - code = str (child.wait ()) - if not child.stdout or child.stdout.close (): - print "pipe failed: %(command)s" % locals () - if code != '0': - error = code + ' ' + error - return (output, error) - -revision_re = re.compile ('GIT [Cc]ommittish:\s+([a-f0-9]+)') -vc_diff_cmd = 'git diff %(color_flag)s %(revision)s HEAD -- %(original)s | cat' - -def check_translated_doc (original, translated_file, translated_contents, color=False): - m = revision_re.search (translated_contents) - if not m: - sys.stderr.write ('error: ' + translated_file + \ - ": no 'GIT committish: ' found.\nPlease check " + \ - 'the whole file against the original in English, then ' + \ - 'fill in HEAD committish in the header.\n') - sys.exit (1) - revision = m.group (1) - - if color: - color_flag = '--color' - else: - color_flag = '--no-color' - c = vc_diff_cmd % vars () - if verbose: - sys.stderr.write ('running: ' + c) - return read_pipe (c) diff --git a/buildscripts/catmidi.py b/buildscripts/catmidi.py deleted file mode 100644 index c90d602627..0000000000 --- a/buildscripts/catmidi.py +++ /dev/null @@ -1,12 +0,0 @@ -#!@PYTHON@ - -import sys -import midi - -(h,tracks) = midi.parse (open (sys.argv[1]).read ()) - -tracks = tracks[1:] - -for t in tracks: - for e in t: - print e diff --git a/buildscripts/check_texi_refs.py b/buildscripts/check_texi_refs.py deleted file mode 100644 index dff7e334f1..0000000000 --- a/buildscripts/check_texi_refs.py +++ /dev/null @@ -1,521 +0,0 @@ -#!/usr/bin/env python - -""" -check_texi_refs.py -Interactive Texinfo cross-references checking and fixing tool - -""" - - -import sys -import re -import os -import optparse -import imp - -outdir = 'out-www' - -log = sys.stderr -stdout = sys.stdout - -file_not_found = 'file not found in include path' - -warn_not_fixed = '*** Warning: this broken x-ref has not been fixed!\n' - -opt_parser = optparse.OptionParser (usage='check_texi_refs.py [OPTION]... FILE', - description='''Check and fix \ -cross-references in a collection of Texinfo -documents heavily cross-referenced each other. -''') - -opt_parser.add_option ('-a', '--auto-fix', - help="Automatically fix cross-references whenever \ -it is possible", - action='store_true', - dest='auto_fix', - default=False) - -opt_parser.add_option ('-b', '--batch', - help="Do not run interactively", - action='store_false', - dest='interactive', - default=True) - -opt_parser.add_option ('-c', '--check-comments', - help="Also check commented out x-refs", - action='store_true', - dest='check_comments', - default=False) - -opt_parser.add_option ('-p', '--check-punctuation', - help="Check punctuation after x-refs", - action='store_true', - dest='check_punctuation', - default=False) - -opt_parser.add_option ("-I", '--include', help="add DIR to include path", - metavar="DIR", - action='append', dest='include_path', - default=[os.path.abspath (os.getcwd ())]) - -(options, files) = opt_parser.parse_args () - -class InteractionError (Exception): - pass - - -manuals_defs = imp.load_source ('manuals_defs', files[0]) -manuals = {} - -def find_file (name, prior_directory='.'): - p = os.path.join (prior_directory, name) - out_p = os.path.join (prior_directory, outdir, name) - if os.path.isfile (p): - return p - elif os.path.isfile (out_p): - return out_p - - # looking for file in include_path - for d in options.include_path: - p = os.path.join (d, name) - if os.path.isfile (p): - return p - - # file not found in include_path: looking in `outdir' subdirs - for d in options.include_path: - p = os.path.join (d, outdir, name) - if os.path.isfile (p): - return p - - raise EnvironmentError (1, file_not_found, name) - - -exit_code = 0 - -def set_exit_code (n): - global exit_code - exit_code = max (exit_code, n) - - -if options.interactive: - try: - import readline - except: - pass - - def yes_prompt (question, default=False, retries=3): - d = {True: 'y', False: 'n'}.get (default, False) - while retries: - a = raw_input ('%s [default: %s]' % (question, d) + '\n') - if a.lower ().startswith ('y'): - return True - if a.lower ().startswith ('n'): - return False - if a == '' or retries < 0: - return default - stdout.write ("Please answer yes or no.\n") - retries -= 1 - - def search_prompt (): - """Prompt user for a substring to look for in node names. - -If user input is empty or matches no node name, return None, -otherwise return a list of (manual, node name, file) tuples. - -""" - substring = raw_input ("Enter a substring to search in node names \ -(press Enter to skip this x-ref):\n") - if not substring: - return None - substring = substring.lower () - matches = [] - for k in manuals: - matches += [(k, node, manuals[k]['nodes'][node][0]) - for node in manuals[k]['nodes'] - if substring in node.lower ()] - return matches - -else: - def yes_prompt (question, default=False, retries=3): - return default - - def search_prompt (): - return None - - -ref_re = re.compile \ - ('@(ref|ruser|rlearning|rprogram|rglos)(?:\\{(?P[^,\\\\\\}]+?)|\ -named\\{(?P[^,\\\\]+?),(?P[^,\\\\\\}]+?))\\}(?P.)', - re.DOTALL) -node_include_re = re.compile (r'(?m)^@(node|include)\s+(.+?)$') - -whitespace_re = re.compile (r'\s+') -line_start_re = re.compile ('(?m)^') - -def which_line (index, newline_indices): - """Calculate line number of a given string index - -Return line number of string index index, where -newline_indices is an ordered iterable of all newline indices. -""" - inf = 0 - sup = len (newline_indices) - 1 - n = len (newline_indices) - while inf + 1 != sup: - m = (inf + sup) / 2 - if index >= newline_indices [m]: - inf = m - else: - sup = m - return inf + 1 - - -comments_re = re.compile ('(? comments_boundaries[k][0] - and end <= comments_boundaries[k][1]): - return True - elif end <= comments_boundaries[k][0]: - return False - return False - - -def read_file (f, d): - s = open (f).read () - base = os.path.basename (f) - dir = os.path.dirname (f) - - d['contents'][f] = s - - d['newline_indices'][f] = [m.end () for m in line_start_re.finditer (s)] - if options.check_comments: - d['comments_boundaries'][f] = [] - else: - d['comments_boundaries'][f] = calc_comments_boundaries (s) - - for m in node_include_re.finditer (s): - if m.group (1) == 'node': - line = which_line (m.start (), d['newline_indices'][f]) - d['nodes'][m.group (2)] = (f, line) - - elif m.group (1) == 'include': - try: - p = find_file (m.group (2), dir) - except EnvironmentError, (errno, strerror): - if strerror == file_not_found: - continue - else: - raise - read_file (p, d) - - -def read_manual (name): - """Look for all node names and cross-references in a Texinfo document - -Return a (manual, dictionary) tuple where manual is the cross-reference -macro name defined by references_dict[name], and dictionary -has the following keys: - - 'nodes' is a dictionary of `node name':(file name, line number), - - 'contents' is a dictionary of file:`full file contents', - - 'newline_indices' is a dictionary of -file:[list of beginning-of-line string indices], - - 'comments_boundaries' is a list of (start, end) tuples, -which contain string indices of start and end of each comment. - -Included files that can be found in the include path are processed too. - -""" - d = {} - d['nodes'] = {} - d['contents'] = {} - d['newline_indices'] = {} - d['comments_boundaries'] = {} - manual = manuals_defs.references_dict.get (name, '') - try: - f = find_file (name + '.tely') - except EnvironmentError, (errno, strerror): - if not strerror == file_not_found: - raise - else: - try: - f = find_file (name + '.texi') - except EnvironmentError, (errno, strerror): - if strerror == file_not_found: - sys.stderr.write (name + '.{texi,tely}: ' + - file_not_found + '\n') - return (manual, d) - else: - raise - - log.write ("Processing manual %s (%s)\n" % (f, manual)) - read_file (f, d) - return (manual, d) - - -log.write ("Reading files...\n") - -manuals = dict ([read_manual (name) - for name in manuals_defs.references_dict.keys ()]) - -ref_fixes = set () -bad_refs_count = 0 -fixes_count = 0 - -def add_fix (old_type, old_ref, new_type, new_ref): - ref_fixes.add ((old_type, old_ref, new_type, new_ref)) - - -def lookup_fix (r): - found = [] - for (old_type, old_ref, new_type, new_ref) in ref_fixes: - if r == old_ref: - found.append ((new_type, new_ref)) - return found - - -def preserve_linebreak (text, linebroken): - if linebroken: - if ' ' in text: - text = text.replace (' ', '\n', 1) - n = '' - else: - n = '\n' - else: - n = '' - return (text, n) - - -def choose_in_numbered_list (message, string_list, sep=' ', retries=3): - S = set (string_list) - S.discard ('') - string_list = list (S) - numbered_list = sep.join ([str (j + 1) + '. ' + string_list[j] - for j in range (len (string_list))]) + '\n' - t = retries - while t > 0: - value = '' - stdout.write (message + - "(press Enter to discard and start a new search)\n") - input = raw_input (numbered_list) - if not input: - return '' - try: - value = string_list[int (input) - 1] - except IndexError: - stdout.write ("Error: index number out of range\n") - except ValueError: - matches = [input in v for v in string_list] - n = matches.count (True) - if n == 0: - stdout.write ("Error: input matches no item in the list\n") - elif n > 1: - stdout.write ("Error: ambiguous input (matches several items \ -in the list)\n") - else: - value = string_list[matches.index (True)] - if value: - return value - t -= 1 - raise InteractionError ("%d retries limit exceeded" % retries) - -refs_count = 0 - -def check_ref (manual, file, m): - global fixes_count, bad_refs_count, refs_count - refs_count += 1 - bad_ref = False - fixed = True - type = m.group (1) - original_name = m.group ('ref') or m.group ('refname') - name = whitespace_re.sub (' ', original_name). strip () - newline_indices = manuals[manual]['newline_indices'][file] - line = which_line (m.start (), newline_indices) - linebroken = '\n' in original_name - original_display_name = m.group ('display') - next_char = m.group ('last') - if original_display_name: # the xref has an explicit display name - display_linebroken = '\n' in original_display_name - display_name = whitespace_re.sub (' ', original_display_name). strip () - commented_out = is_commented_out \ - (m.start (), m.end (), manuals[manual]['comments_boundaries'][file]) - useful_fix = not outdir in file - - # check puncuation after x-ref - if options.check_punctuation and not next_char in '.,;:!?': - stdout.write ("Warning: %s: %d: `%s': x-ref \ -not followed by punctuation\n" % (file, line, name)) - - # validate xref - explicit_type = type - new_name = name - - if type != 'ref' and type == manual and not commented_out: - if useful_fix: - fixed = False - bad_ref = True - stdout.write ("\n%s: %d: `%s': external %s x-ref should be internal\n" - % (file, line, name, type)) - if options.auto_fix or yes_prompt ("Fix this?"): - type = 'ref' - - if type == 'ref': - explicit_type = manual - - if not name in manuals[explicit_type]['nodes'] and not commented_out: - bad_ref = True - fixed = False - stdout.write ('\n') - if type == 'ref': - stdout.write ("%s: %d: `%s': wrong internal x-ref\n" - % (file, line, name)) - else: - stdout.write ("%s: %d: `%s': wrong external `%s' x-ref\n" - % (file, line, name, type)) - # print context - stdout.write ('--\n' + manuals[manual]['contents'][file] - [newline_indices[max (0, line - 2)]: - newline_indices[min (line + 3, - len (newline_indices) - 1)]] + - '--\n') - - # try to find the reference in other manuals - found = [] - for k in [k for k in manuals if k != explicit_type]: - if name in manuals[k]['nodes']: - if k == manual: - found = ['ref'] - stdout.write (" found as internal x-ref\n") - break - else: - found.append (k) - stdout.write (" found as `%s' x-ref\n" % k) - - if (len (found) == 1 - and (options.auto_fix or yes_prompt ("Fix this x-ref?"))): - add_fix (type, name, found[0], name) - type = found[0] - fixed = True - - elif len (found) > 1 and useful_fix: - if options.interactive or options.auto_fix: - stdout.write ("* Several manuals contain this node name, \ -cannot determine manual automatically.\n") - if options.interactive: - t = choose_in_numbered_list ("Choose manual for this x-ref by \ -index number or beginning of name:\n", found) - if t: - add_fix (type, name, t, name) - type = t - fixed = True - - if not fixed: - # try to find a fix already made - found = lookup_fix (name) - - if len (found) == 1: - stdout.write ("Found one previous fix: %s `%s'\n" % found[0]) - if options.auto_fix or yes_prompt ("Apply this fix?"): - type, new_name = found[0] - fixed = True - - elif len (found) > 1: - if options.interactive or options.auto_fix: - stdout.write ("* Several previous fixes match \ -this node name, cannot fix automatically.\n") - if options.interactive: - concatened = choose_in_numbered_list ("Choose new manual \ -and x-ref by index number or beginning of name:\n", [''.join ([i[0], ' ', i[1]]) - for i in found], - sep='\n') - if concatened: - type, new_name = concatenated.split (' ', 1) - fixed = True - - if not fixed: - # all previous automatic fixing attempts failed, - # ask user for substring to look in node names - while True: - node_list = search_prompt () - if node_list == None: - if options.interactive: - stdout.write (warn_not_fixed) - break - elif not node_list: - stdout.write ("No matched node names.\n") - else: - concatenated = choose_in_numbered_list ("Choose \ -node name and manual for this x-ref by index number or beginning of name:\n", \ - [' '.join ([i[0], i[1], '(in %s)' % i[2]]) - for i in node_list], - sep='\n') - if concatenated: - t, z = concatenated.split (' ', 1) - new_name = z.split (' (in ', 1)[0] - add_fix (type, name, t, new_name) - type = t - fixed = True - break - - if fixed and type == manual: - type = 'ref' - bad_refs_count += int (bad_ref) - if bad_ref and not useful_fix: - stdout.write ("*** Warning: this file is automatically generated, \ -please fix the code source instead of generated documentation.\n") - - # compute returned string - if new_name == name: - if bad_ref and (options.interactive or options.auto_fix): - # only the type of the ref was fixed - fixes_count += int (fixed) - if original_display_name: - return ('@%snamed{%s,%s}' % (type, original_name, original_display_name)) + next_char - else: - return ('@%s{%s}' % (type, original_name)) + next_char - else: - fixes_count += int (fixed) - (ref, n) = preserve_linebreak (new_name, linebroken) - if original_display_name: - if bad_ref: - stdout.write ("Current display name is `%s'\n") - display_name = raw_input \ - ("Enter a new display name or press enter to keep the existing name:\n") \ - or display_name - (display_name, n) = preserve_linebreak (display_name, display_linebroken) - else: - display_name = original_display_name - return ('@%snamed{%s,%s}' % (type, ref, display_name)) + \ - next_char + n - else: - return ('@%s{%s}' % (type, ref)) + next_char + n - - -log.write ("Checking cross-references...\n") - -try: - for key in manuals: - for file in manuals[key]['contents']: - s = ref_re.sub (lambda m: check_ref (key, file, m), - manuals[key]['contents'][file]) - if s != manuals[key]['contents'][file]: - open (file, 'w').write (s) -except KeyboardInterrupt: - log.write ("Operation interrupted, exiting.\n") - sys.exit (2) -except InteractionError, instance: - log.write ("Operation refused by user: %s\nExiting.\n" % instance) - sys.exit (3) - -log.write ("Done: %d x-refs found, %d bad x-refs found, fixed %d.\n" % - (refs_count, bad_refs_count, fixes_count)) diff --git a/buildscripts/check_translation.py b/buildscripts/check_translation.py deleted file mode 100644 index 090b1fbe8a..0000000000 --- a/buildscripts/check_translation.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python - -import __main__ -import optparse -import os -import sys - -import langdefs -import buildlib - -verbose = 0 -use_colors = False -lang = 'C' -C = lang - -def dir_lang (file, lang, lang_dir_index): - path_components = file.split ('/') - path_components[lang_dir_index] = lang - return os.path.join (*path_components) - -def do_file (file_name, lang_codes, buildlib): - if verbose: - sys.stderr.write ('%s...\n' % file_name) - split_file_name = file_name.split ('/') - d1, d2 = split_file_name[0:2] - if d1 in lang_codes: - check_lang = d1 - lang_dir_index = 0 - elif d2 in lang_codes: - check_lang = d2 - lang_dir_index = 1 - else: - check_lang = lang - if check_lang == C: - raise Exception ('cannot determine language for ' + file_name) - - original = dir_lang (file_name, '', lang_dir_index) - translated_contents = open (file_name).read () - (diff_string, error) \ - = buildlib.check_translated_doc (original, - file_name, - translated_contents, - color=use_colors and not update_mode) - - if error: - sys.stderr.write ('warning: %s: %s' % (file_name, error)) - - if update_mode: - if error or len (diff_string) >= os.path.getsize (original): - buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + original) - elif diff_string: - diff_file = original + '.diff' - f = open (diff_file, 'w') - f.write (diff_string) - f.close () - buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + diff_file) - os.remove (diff_file) - else: - sys.stdout.write (diff_string) - -def usage (): - sys.stdout.write (r''' -Usage: -check-translation [--language=LANG] [--verbose] [--update] FILE... - -This script is licensed under the GNU GPL. -''') - -def do_options (): - global lang, verbose, update_mode, use_colors - - p = optparse.OptionParser (usage="check-translation [--language=LANG] [--verbose] FILE...", - description="This script is licensed under the GNU GPL.") - p.add_option ("--language", - action='store', - default='site', - dest="language") - p.add_option ("--no-color", - action='store_false', - default=True, - dest="color", - help="do not print ANSI-cooured output") - p.add_option ("--verbose", - action='store_true', - default=False, - dest="verbose", - help="print details, including executed shell commands") - p.add_option ('-u', "--update", - action='store_true', - default=False, - dest='update_mode', - help='call $EDITOR to update the translation') - - (options, files) = p.parse_args () - verbose = options.verbose - lang = options.language - use_colors = options.color - update_mode = options.update_mode - - return files - -def main (): - global update_mode, text_editor - - files = do_options () - if 'EDITOR' in os.environ: - text_editor = os.environ['EDITOR'] - else: - update_mode = False - - buildlib.verbose = verbose - - for i in files: - do_file (i, langdefs.LANGDICT.keys (), buildlib) - -if __name__ == '__main__': - main () diff --git a/buildscripts/coverage.py b/buildscripts/coverage.py deleted file mode 100644 index d44f81fdee..0000000000 --- a/buildscripts/coverage.py +++ /dev/null @@ -1,248 +0,0 @@ -#!/usr/bin/python - -import os -import glob -import re -import sys -import optparse - -#File 'accidental-engraver.cc' -#Lines executed:87.70% of 252 - -def summary (args): - results = [] - for f in args: - str = open (f).read () - m = re.search ("File '([^']+.cc)'\s*Lines executed:([0-9.]+)% of ([0-9]+)", str) - - if m and '/usr/lib' in m.group (1): - continue - - if m: - cov = float (m.group (2)) - lines = int (m.group (3)) - pain = lines * (100.0 - cov) - file = m.group (1) - tup = (pain, locals ().copy()) - - results.append(tup) - - results.sort () - results.reverse() - - print 'files sorted by number of untested lines (decreasing)' - print - print '%5s (%6s): %s' % ('cov %', 'lines', 'file') - print '----------------------------------------------' - - for (pain, d) in results: - print '%(cov)5.2f (%(lines)6d): %(file)s' % d - -class Chunk: - def __init__ (self, range, coverage_count, all_lines, file): - assert coverage_count >= 0 - assert type (range) == type (()) - - self.coverage_count = coverage_count - self.range = range - self.all_lines = all_lines - self.file = file - - def length (self): - return self.range[1] - self.range[0] - - def text (self): - return ''.join ([l[2] for l in self.lines()]) - - def lines (self): - return self.all_lines[self.range[0]: - self.range[1]] - def widen (self): - self.range = (min (self.range[0] -1, 0), - self.range[0] +1) - def write (self): - print 'chunk in', self.file - for (c, n, l) in self.lines (): - cov = '%d' % c - if c == 0: - cov = '#######' - elif c < 0: - cov = '' - sys.stdout.write ('%8s:%8d:%s' % (cov, n, l)) - - def uncovered_score (self): - return self.length () - -class SchemeChunk (Chunk): - def uncovered_score (self): - text = self.text () - if (text.startswith ('(define ') - and not text.startswith ('(define (')): - return 0 - - if text.startswith ('(use-modules '): - return 0 - - if (text.startswith ('(define-public ') - and not text.startswith ('(define-public (')): - return 0 - - return len ([l for (c,n,l) in self.lines() if (c == 0)]) - -def read_gcov (f): - ls = [] - - in_lines = [l for l in open (f).readlines ()] - (count_len, line_num_len) = tuple (map (len, in_lines[0].split (':')[:2])) - - for l in in_lines: - c = l[:count_len].strip () - l = l[count_len+1:] - n = int (l[:line_num_len].strip ()) - - if n == 0: - continue - - if '#' in c: - c = 0 - elif c == '-': - c = -1 - else: - c = int (c) - - l = l[line_num_len+1:] - - ls.append ((c,n,l)) - - return ls - -def get_c_chunks (ls, file): - chunks = [] - chunk = [] - - last_c = -1 - for (c, n, l) in ls: - if not (c == last_c or c < 0 and l != '}\n'): - if chunk and last_c >= 0: - nums = [n-1 for (n, l) in chunk] - chunks.append (Chunk ((min (nums), max (nums)+1), - last_c, ls, file)) - chunk = [] - - chunk.append ((n,l)) - if c >= 0: - last_c = c - - return chunks - -def get_scm_chunks (ls, file): - chunks = [] - chunk = [] - - def new_chunk (): - if chunk: - nums = [n-1 for (n, l) in chunk] - chunks.append (SchemeChunk ((min (nums), max (nums)+1), - max (last_c, 0), ls, file)) - chunk[:] = [] - - last_c = -1 - for (cov_count, line_number, line) in ls: - if line.startswith ('('): - new_chunk () - last_c = -1 - - chunk.append ((line_number, line)) - if cov_count >= 0: - last_c = cov_count - - return chunks - -def widen_chunk (ch, ls): - a -= 1 - b += 1 - - return [(n, l) for (c, n, l) in ls[a:b]] - - -def extract_chunks (file): - try: - ls = read_gcov (file) - except IOError, s : - print s - return [] - - cs = [] - if 'scm' in file: - cs = get_scm_chunks (ls, file) - else: - cs = get_c_chunks (ls, file) - return cs - - -def filter_uncovered (chunks): - def interesting (c): - if c.coverage_count > 0: - return False - - t = c.text() - for stat in ('warning', 'error', 'print', 'scm_gc_mark'): - if stat in t: - return False - return True - - return [c for c in chunks if interesting (c)] - - -def main (): - p = optparse.OptionParser (usage="usage coverage.py [options] files", - description="") - p.add_option ("--summary", - action='store_true', - default=False, - dest="summary") - - p.add_option ("--hotspots", - default=False, - action='store_true', - dest="hotspots") - - p.add_option ("--uncovered", - default=False, - action='store_true', - dest="uncovered") - - - (options, args) = p.parse_args () - - - if options.summary: - summary (['%s.gcov-summary' % s for s in args]) - - if options.uncovered or options.hotspots: - chunks = [] - for a in args: - name = a - if name.endswith ('scm'): - name += '.cov' - else: - name += '.gcov' - - chunks += extract_chunks (name) - - if options.uncovered: - chunks = filter_uncovered (chunks) - chunks = [(c.uncovered_score (), c) for c in chunks if c.uncovered_score() > 0] - elif options.hotspots: - chunks = [((c.coverage_count, -c.length()), c) for c in chunks] - - - chunks.sort () - chunks.reverse () - for (score, c) in chunks: - c.write () - - - -if __name__ == '__main__': - main () diff --git a/buildscripts/extract_texi_filenames.py b/buildscripts/extract_texi_filenames.py deleted file mode 100644 index 5798d5dab2..0000000000 --- a/buildscripts/extract_texi_filenames.py +++ /dev/null @@ -1,170 +0,0 @@ -#!@PYTHON@ -# -*- coding: utf-8 -*- -# extract_texi_filenames.py - -# USAGE: extract_texi_filenames.py [-o OUTDIR] FILES -# -# -o OUTDIR specifies that output files should rather be written in OUTDIR -# -# Description: -# This script parses the .texi file given and creates a file with the -# nodename <=> filename/anchor map. -# The idea behind: Unnumbered subsections go into the same file as the -# previous numbered section, @translationof gives the original node name, -# which is then used for the filename/anchor. -# -# If this script is run on a file texifile.texi, it produces a file -# texifile[.LANG].xref-map with tab-separated entries of the form -# NODE\tFILENAME\tANCHOR -# LANG is the document language in case it's not 'en' -# Note: The filename does not have any extension appended! -# This file can then be used by our texi2html init script to determine -# the correct file name and anchor for external refs - -import sys -import re -import os -import getopt - -optlist, args = getopt.getopt (sys.argv[1:],'o:') -files = args - -outdir = '.' -for x in optlist: - if x[0] == '-o': - outdir = x[1] - -if not os.path.isdir (outdir): - if os.path.exists (outdir): - os.unlink (outdir) - os.makedirs (outdir) - -include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M) -whitespaces = re.compile (r'\s+') -section_translation_re = re.compile ('^@(node|(?:unnumbered|appendix)\ -(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|\ -(?:major|chap|(?:sub){0,2})heading|translationof) (.*?)\\s*$', re.MULTILINE) - -def expand_includes (m, filename): - filepath = os.path.join (os.path.dirname (filename), m.group(1)) + '.texi' - if os.path.exists (filepath): - return extract_sections (filepath)[1] - else: - print "Unable to locate include file " + filepath - return '' - -lang_re = re.compile (r'^@documentlanguage (.+)', re.M) - -def extract_sections (filename): - result = '' - f = open (filename, 'r') - page = f.read () - f.close() - # Search document language - m = lang_re.search (page) - if m and m.group (1) != 'en': - lang_suffix = '.' + m.group (1) - else: - lang_suffix = '' - # Replace all includes by their list of sections and extract all sections - page = include_re.sub (lambda m: expand_includes (m, filename), page) - sections = section_translation_re.findall (page) - for sec in sections: - result += "@" + sec[0] + " " + sec[1] + "\n" - return (lang_suffix, result) - -# Convert a given node name to its proper file name (normalization as explained -# in the texinfo manual: -# http://www.gnu.org/software/texinfo/manual/texinfo/html_node/HTML-Xref-Node-Name-Expansion.html -def texinfo_file_name(title): - # exception: The top node is always mapped to index.html - if title == "Top": - return "index" - # File name normalization by texinfo (described in the texinfo manual): - # 1/2: letters and numbers are left unchanged - # 3/4: multiple, leading and trailing whitespace is removed - title = title.strip (); - title = whitespaces.sub (' ', title) - # 5: all remaining spaces are converted to '-' - # 6: all other 7- or 8-bit chars are replaced by _xxxx (xxxx=ascii character code) - result = '' - for index in range(len(title)): - char = title[index] - if char == ' ': # space -> '-' - result += '-' - elif ( ('0' <= char and char <= '9' ) or - ('A' <= char and char <= 'Z' ) or - ('a' <= char and char <= 'z' ) ): # number or letter - result += char - else: - ccode = ord(char) - if ccode <= 0xFFFF: - result += "_%04x" % ccode - else: - result += "__%06x" % ccode - # 7: if name begins with number, prepend 't_g' (so it starts with a letter) - if (result != '') and (ord(result[0]) in range (ord('0'), ord('9'))): - result = 't_g' + result - return result - -texinfo_re = re.compile (r'@.*{(.*)}') -def remove_texinfo (title): - return texinfo_re.sub (r'\1', title) - -def create_texinfo_anchor (title): - return texinfo_file_name (remove_texinfo (title)) - -unnumbered_re = re.compile (r'unnumbered.*') -def process_sections (filename, lang_suffix, page): - sections = section_translation_re.findall (page) - basename = os.path.splitext (os.path.basename (filename))[0] - p = os.path.join (outdir, basename) + lang_suffix + '.xref-map' - f = open (p, 'w') - - this_title = '' - this_filename = 'index' - this_anchor = '' - this_unnumbered = False - had_section = False - for sec in sections: - if sec[0] == "node": - # Write out the cached values to the file and start a new section: - if this_title != '' and this_title != 'Top': - f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n") - had_section = False - this_title = remove_texinfo (sec[1]) - this_anchor = create_texinfo_anchor (sec[1]) - elif sec[0] == "translationof": - anchor = create_texinfo_anchor (sec[1]) - # If @translationof is used, it gives the original node name, which - # we use for the anchor and the file name (if it is a numbered node) - this_anchor = anchor - if not this_unnumbered: - this_filename = anchor - else: - # Some pages might not use a node for every section, so treat this - # case here, too: If we already had a section and encounter enother - # one before the next @node, we write out the old one and start - # with the new values - if had_section and this_title != '': - f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n") - this_title = remove_texinfo (sec[1]) - this_anchor = create_texinfo_anchor (sec[1]) - had_section = True - - # unnumbered nodes use the previously used file name, only numbered - # nodes get their own filename! However, top-level @unnumbered - # still get their own file. - this_unnumbered = unnumbered_re.match (sec[0]) - if not this_unnumbered or sec[0] == "unnumbered": - this_filename = this_anchor - - if this_title != '' and this_title != 'Top': - f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n") - f.close () - - -for filename in files: - print "extract_texi_filenames.py: Processing %s" % filename - (lang_suffix, sections) = extract_sections (filename) - process_sections (filename, lang_suffix, sections) diff --git a/buildscripts/find-superfluous-includes.py b/buildscripts/find-superfluous-includes.py deleted file mode 100644 index ded1087da7..0000000000 --- a/buildscripts/find-superfluous-includes.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python -import sys -import re -import os - - -full_paths = {} -incs = {} -inc_re = re.compile ('^#include "([^"]+)"') -def parse_file (fn): - lst = [] - - lc = 0 - for l in open (fn).readlines(): - lc += 1 - m = inc_re.search (l) - if m: - lst.append ((lc, m.group (1))) - - base = os.path.split (fn)[1] - full_paths[base] = fn - incs[base] = lst - - -def has_include (f, name): - try: - return name in [b for (a,b) in incs[f]] - except KeyError: - return False - -for a in sys.argv: - parse_file (a) - -print '-*-compilation-*-' -for (f, lst) in incs.items (): - for (n, inc) in lst: - for (n2, inc2) in lst: - if has_include (inc2, inc): - print "%s:%d: already have %s from %s" % (full_paths[f], n, - inc, inc2) - break - - - diff --git a/buildscripts/fixcc.py b/buildscripts/fixcc.py deleted file mode 100644 index 3cc162b8a2..0000000000 --- a/buildscripts/fixcc.py +++ /dev/null @@ -1,625 +0,0 @@ -#!/usr/bin/python - -# fixcc -- nitpick lily's c++ code - -# TODO -# * maintainable rules: regexp's using whitespace (?x) and match names -# ) -# * trailing `*' vs. function definition -# * do not break/change indentation of fixcc-clean files -# * check lexer, parser -# * rewrite in elisp, add to cc-mode -# * using regexes is broken by design -# * ? -# * profit - -import __main__ -import getopt -import os -import re -import string -import sys -import time - -COMMENT = 'COMMENT' -STRING = 'STRING' -GLOBAL_CXX = 'GC++' -CXX = 'C++' -verbose_p = 0 -indent_p = 0 - -rules = { - GLOBAL_CXX: - [ - # delete gratuitous block - ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''', - '\n\\2;'), - ], - CXX: - [ - # space before parenthesis open - ('([^\( \]])[ \t]*\(', '\\1 ('), - # space after comma - ("\([^'],\)[ \t]*", '\1 '), - # delete gratuitous block - ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''', - '\n\\2;'), - # delete inline tabs - ('(\w)\t+', '\\1 '), - # delete inline double spaces - (' *', ' '), - # delete space after parenthesis open - ('\([ \t]*', '('), - # delete space before parenthesis close - ('[ \t]*\)', ')'), - # delete spaces after prefix - ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'), - # delete spaces before postfix - ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'), - # delete space after parenthesis close - #('\)[ \t]*([^\w])', ')\\1'), - # delete space around operator - # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'), - ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'), - # delete space after operator - ('(::)([ \t]*)([\w\(\)])', '\\1\\3'), - # delete superflous space around operator - ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'), - # space around operator1 - ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'), - # space around operator2 - ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'), - # space around operator3 - ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'), - # space around operator4 - ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'), - # space around +/-; exponent - ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'), - ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'), - # trailing operator - (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '), - # pointer - ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'), - ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'), - #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'), - # pointer with template - ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'), - #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'), - # unary pointer, minus, not - ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'), - # space after `operator' - ('(\Woperator) *([^\w\s])', '\\1 \\2'), - # dangling brace close - ('\n[ \t]*(\n[ \t]*})', '\\1'), - # dangling newline - ('\n[ \t]*\n[ \t]*\n', '\n\n'), - # dangling parenthesis open - #('[ \t]*\n[ \t]*\([ \t]*\n', '('), - ('\([ \t]*\n', '('), - # dangling parenthesis close - ('\n[ \t]*\)', ')'), - # dangling comma - ('\n[ \t]*,', ','), - # dangling semicolon - ('\n[ \t]*;', ';'), - # brace open - ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'), - # brace open backslash - ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'), - # brace close - ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'), - # brace close backslash - ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'), - # delete space after `operator' - #('(\Woperator) (\W)', '\\1\\2'), - # delete space after case, label - ('(\W(case|label) ([\w]+)) :', '\\1:'), - # delete space before comma - ('[ \t]*,', ','), - # delete space before semicolon - ('[ \t]*;', ';'), - # delete space before eol-backslash - ('[ \t]*\\\\\n', '\\\n'), - # delete trailing whitespace - ('[ \t]*\n', '\n'), - - ## Deuglify code that also gets ugly by rules above. - # delete newline after typedef struct - ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'), - # delete spaces around template brackets - #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'), - ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'), - ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'), - ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'), - ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'), - # do {..} while - ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'), - - ## Fix code that gets broken by rules above. - ##('->\s+\*', '->*'), - # delete space before #define x() - ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('), - # add space in #define x () - ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)', - '#define \\1 \\2'), - # delete space in #include <> - ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>', - '#include <\\1\\2\\3>'), - # delete backslash before empty line (emacs' indent region is broken) - ('\\\\\n\n', '\n\n'), - ], - - COMMENT: - [ - # delete trailing whitespace - ('[ \t]*\n', '\n'), - # delete empty first lines - ('(/\*\n)\n*', '\\1'), - # delete empty last lines - ('\n*(\n\*/)', '\\1'), - ## delete newline after start? - #('/(\*)\n', '\\1'), - ## delete newline before end? - #('\n(\*/)', '\\1'), - ], - } - -# Recognize special sequences in the input. -# -# (?Pregex) -- Assign result of REGEX to NAME. -# *? -- Match non-greedily. -# (?m) -- Multiline regex: Make ^ and $ match at each line. -# (?s) -- Make the dot match all characters including newline. -# (?x) -- Ignore whitespace in patterns. -no_match = 'a\ba' -snippet_res = { - CXX: { - 'multiline_comment': - r'''(?sx) - (?P - (?P - [ \t]*/\*.*?\*/))''', - - 'singleline_comment': - r'''(?mx) - ^.* - (?P - (?P - [ \t]*//([ \t][^\n]*|)\n))''', - - 'string': - r'''(?x) - (?P - (?P - "([^\"\n](\")*)*"))''', - - 'char': - r'''(?x) - (?P - (?P - '([^']+|\')))''', - - 'include': - r'''(?x) - (?P - (?P - "#[ \t]*include[ \t]*<[^>]*>''', - }, - } - -class Chunk: - def replacement_text (self): - return '' - - def filter_text (self): - return self.replacement_text () - -class Substring (Chunk): - def __init__ (self, source, start, end): - self.source = source - self.start = start - self.end = end - - def replacement_text (self): - s = self.source[self.start:self.end] - if verbose_p: - sys.stderr.write ('CXX Rules') - for i in rules[CXX]: - if verbose_p: - sys.stderr.write ('.') - #sys.stderr.write ('\n\n***********\n') - #sys.stderr.write (i[0]) - #sys.stderr.write ('\n***********\n') - #sys.stderr.write ('\n=========>>\n') - #sys.stderr.write (s) - #sys.stderr.write ('\n<<=========\n') - s = re.sub (i[0], i[1], s) - if verbose_p: - sys.stderr.write ('done\n') - return s - - -class Snippet (Chunk): - def __init__ (self, type, match, format): - self.type = type - self.match = match - self.hash = 0 - self.options = [] - self.format = format - - def replacement_text (self): - return self.match.group ('match') - - def substring (self, s): - return self.match.group (s) - - def __repr__ (self): - return `self.__class__` + ' type = ' + self.type - -class Multiline_comment (Snippet): - def __init__ (self, source, match, format): - self.type = type - self.match = match - self.hash = 0 - self.options = [] - self.format = format - - def replacement_text (self): - s = self.match.group ('match') - if verbose_p: - sys.stderr.write ('COMMENT Rules') - for i in rules[COMMENT]: - if verbose_p: - sys.stderr.write ('.') - s = re.sub (i[0], i[1], s) - return s - -snippet_type_to_class = { - 'multiline_comment': Multiline_comment, -# 'string': Multiline_comment, -# 'include': Include_snippet, -} - -def find_toplevel_snippets (s, types): - if verbose_p: - sys.stderr.write ('Dissecting') - - res = {} - for i in types: - res[i] = re.compile (snippet_res[format][i]) - - snippets = [] - index = 0 - ## found = dict (map (lambda x: (x, None), - ## types)) - ## urg python2.1 - found = {} - map (lambda x, f = found: f.setdefault (x, None), - types) - - # We want to search for multiple regexes, without searching - # the string multiple times for one regex. - # Hence, we use earlier results to limit the string portion - # where we search. - # Since every part of the string is traversed at most once for - # every type of snippet, this is linear. - - while 1: - if verbose_p: - sys.stderr.write ('.') - first = None - endex = 1 << 30 - for type in types: - if not found[type] or found[type][0] < index: - found[type] = None - m = res[type].search (s[index:endex]) - if not m: - continue - - cl = Snippet - if snippet_type_to_class.has_key (type): - cl = snippet_type_to_class[type] - snip = cl (type, m, format) - start = index + m.start ('match') - found[type] = (start, snip) - - if found[type] \ - and (not first \ - or found[type][0] < found[first][0]): - first = type - - # FIXME. - - # Limiting the search space is a cute - # idea, but this *requires* to search - # for possible containing blocks - # first, at least as long as we do not - # search for the start of blocks, but - # always/directly for the entire - # @block ... @end block. - - endex = found[first][0] - - if not first: - snippets.append (Substring (s, index, len (s))) - break - - (start, snip) = found[first] - snippets.append (Substring (s, index, start)) - snippets.append (snip) - found[first] = None - index = start + len (snip.match.group ('match')) - - return snippets - -def nitpick_file (outdir, file): - s = open (file).read () - - for i in rules[GLOBAL_CXX]: - s = re.sub (i[0], i[1], s) - - # FIXME: Containing blocks must be first, see - # find_toplevel_snippets. - # We leave simple strings be part of the code - snippet_types = ( - 'multiline_comment', - 'singleline_comment', - 'string', -# 'char', - ) - - chunks = find_toplevel_snippets (s, snippet_types) - #code = filter (lambda x: is_derived_class (x.__class__, Substring), - # chunks) - - t = string.join (map (lambda x: x.filter_text (), chunks), '') - fixt = file - if s != t: - if not outdir: - os.system ('mv %s %s~' % (file, file)) - else: - fixt = os.path.join (outdir, - os.path.basename (file)) - h = open (fixt, "w") - h.write (t) - h.close () - if s != t or indent_p: - indent_file (fixt) - -def indent_file (file): - emacs = '''emacs\ - --no-window-system\ - --batch\ - --no-site-file\ - --no-init-file\ - %(file)s\ - --eval '(let ((error nil) - (version-control nil)) - (load-library "cc-mode") - (c++-mode) - (indent-region (point-min) (point-max)) - (if (buffer-modified-p (current-buffer)) - (save-buffer)))' ''' % vars () - emacsclient = '''emacsclient\ - --socket-name=%(socketdir)s/%(socketname)s\ - --no-wait\ - --eval '(let ((error nil) - (version-control nil)) - (load-library "cc-mode") - (find-file "%(file)s") - (c++-mode) - (indent-region (point-min) (point-max)) - (if (buffer-modified-p (current-buffer)) - (save-buffer)))' ''' \ - % { 'file': file, - 'socketdir' : socketdir, - 'socketname' : socketname, } - if verbose_p: - sys.stderr.write (emacs) - sys.stderr.write ('\n') - os.system (emacs) - - -def usage (): - sys.stdout.write (r''' -Usage: -fixcc [OPTION]... FILE... - -Options: - --help - --indent reindent, even if no changes - --verbose - --test - -Typical use with LilyPond: - - fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out) - -This script is licensed under the GNU GPL -''') - -def do_options (): - global indent_p, outdir, verbose_p - (options, files) = getopt.getopt (sys.argv[1:], '', - ['help', 'indent', 'outdir=', - 'test', 'verbose']) - for (o, a) in options: - if o == '--help': - usage () - sys.exit (0) - elif o == '--indent': - indent_p = 1 - elif o == '--outdir': - outdir = a - elif o == '--verbose': - verbose_p = 1 - elif o == '--test': - test () - sys.exit (0) - else: - assert unimplemented - if not files: - usage () - sys.exit (2) - return files - - -outdir = 0 -format = CXX -socketdir = '/tmp/fixcc' -socketname = 'fixcc%d' % os.getpid () - -def setup_client (): - #--no-window-system\ - #--batch\ - os.unlink (os.path.join (socketdir, socketname)) - os.mkdir (socketdir, 0700) - emacs='''emacs\ - --no-site-file\ - --no-init-file\ - --eval '(let ((error nil) - (version-control nil)) - (load-library "server") - (setq server-socket-dir "%(socketdir)s") - (setq server-name "%(socketname)s") - (server-start) - (while t) (sleep 1000))' ''' \ - % { 'socketdir' : socketdir, - 'socketname' : socketname, } - - if not os.fork (): - os.system (emacs) - sys.exit (0) - while not os.path.exists (os.path.join (socketdir, socketname)): - time.sleep (1) - -def main (): - #emacsclient should be faster, but this does not work yet - #setup_client () - files = do_options () - if outdir and not os.path.isdir (outdir): - os.makedirs (outdir) - for i in files: - sys.stderr.write ('%s...\n' % i) - nitpick_file (outdir, i) - - -## TODO: make this compilable and check with g++ -TEST = ''' -#include -#include -class -ostream ; - -class Foo { -public: static char* foo (); -std::map* bar (char, char) { return 0; } -}; -typedef struct -{ - Foo **bar; -} String; - -ostream & -operator << (ostream & os, String d); - -typedef struct _t_ligature -{ - char *succ, *lig; - struct _t_ligature * next; -} AFM_Ligature; - -typedef std::map < AFM_Ligature const *, int > Bar; - - /** - (c) 1997--2008 Han-Wen Nienhuys - */ - -/* || -* vv -* !OK OK -*/ -/* || - vv - !OK OK -*/ -char * -Foo:: foo () -{ -int -i -; - char* a= &++ i ; - a [*++ a] = (char*) foe (*i, &bar) * - 2; - int operator double (); - std::map y =*bar(-*a ,*b); - Interval_t & operator*= (T r); - Foo*c; - int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2); - delete *p; - if (abs (f)*2 > abs (d) *FUDGE) - ; - while (0); - for (; ibar); - for (; *p && > y; - foo > bar) -; - do { - ;;; - } - while (foe); - - squiggle. extent; - 1 && * unsmob_moment (lf); - line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm -(): SCM_EOL); - case foo: k; - - if (0) {a=b;} else { - c=d; - } - - cookie_io_functions_t Memory_out_stream::functions_ = { - Memory_out_stream::reader, - ... - }; - - int compare (Array < Pitch> *, Array < Pitch> *); - original_ = (Grob *) & s; - Drul_array< Link_array > o; -} - - header_.char_info_pos = (6 + header_length) * 4; - return ly_bool2scm (*ma < * mb); - - 1 *::sign(2); - - (shift) *-d; - - a = 0 ? *x : *y; - -a = "foo() 2,2,4"; -{ - if (!span_) - { - span_ = make_spanner ("StaffSymbol", SCM_EOL); - } -} -{ - if (!span_) - { - span_ = make_spanner (StaffSymbol, SCM_EOL); - } -} -''' - -def test (): - test_file = 'fixcc.cc' - open (test_file, 'w').write (TEST) - nitpick_file (outdir, test_file) - sys.stdout.write (open (test_file).read ()) - -if __name__ == '__main__': - main () - diff --git a/buildscripts/gen-emmentaler-scripts.py b/buildscripts/gen-emmentaler-scripts.py deleted file mode 100644 index 3da8840869..0000000000 --- a/buildscripts/gen-emmentaler-scripts.py +++ /dev/null @@ -1,104 +0,0 @@ -#!@PYTHON@ -import sys -import getopt -import re -import os - -(options, files) = \ - getopt.getopt (sys.argv[1:], - '', - ['dir=']) - - -outdir = '' -for opt in options: - o = opt[0] - a = opt[1] - if o == '--dir': - outdir = a - else: - print o - raise getopt.error - -# Ugh -for design_size in [11,13,14,16,18,20,23,26]: - name = 'Emmentaler' - filename = name.lower () - script = '''#!@FONTFORGE@ - -New(); - -# Separate Feta versioning? -# * using 20 as Weight works for gnome-font-select widget: gfs - -notice = ""; -notice += "This font is distributed under the GNU General Public License. "; -notice += "As a special exception, if you create a document which uses "; -notice += "this font, and embed this font or unaltered portions of this "; -notice += "font into the document, this font does not by itself cause the "; -notice += "resulting document to be covered by the GNU General Public License.";; - -SetFontNames("%(name)s-%(design_size)d", "%(name)s", "%(name)s %(design_size)d", "%(design_size)d", notice, "@TOPLEVEL_VERSION@"); - -MergeFonts("feta%(design_size)d.pfb"); -MergeFonts("parmesan%(design_size)d.pfb"); - -# load nummer/din after setting PUA. -i = 0; -while (i < CharCnt()) - Select(i); -# crashes fontforge, use PUA for now -- jcn -# SetUnicodeValue(i + 0xF0000, 0); -/* -PRIVATE AREA - In the BMP, the range 0xe000 to 0xf8ff will never be assigned to any - characters by the standard and is reserved for private usage. For the - Linux community, this private area has been subdivided further into the - range 0xe000 to 0xefff which can be used individually by any end-user - and the Linux zone in the range 0xf000 to 0xf8ff where extensions are - coordinated among all Linux users. The registry of the characters - assigned to the Linux zone is currently maintained by H. Peter Anvin - . -*/ - SetUnicodeValue(i + 0xE000, 0); - ++i; -endloop - - -MergeFonts("feta-alphabet%(design_size)d.pfb"); -MergeKern("feta-alphabet%(design_size)d.tfm"); - -LoadTableFromFile("LILF", "%(filename)s-%(design_size)d.subfonts"); -LoadTableFromFile("LILC", "feta%(design_size)d.otf-table"); -LoadTableFromFile("LILY", "feta%(design_size)d.otf-gtable"); - -Generate("%(filename)s-%(design_size)d.otf"); -Generate("%(filename)s-%(design_size)d.svg"); -''' % vars() - - basename = '%s-%d' % (filename, design_size) - path = os.path.join (outdir, basename + '.pe') - open (path, 'w').write (script) - - subfonts = ['feta%(design_size)d', - 'parmesan%(design_size)d', - 'feta-alphabet%(design_size)d'] - - ns = [] - for s in subfonts: - ns.append ('%s' % (s % vars())) - - subfonts_str = ' '.join (ns) - - open (os.path.join (outdir, '%(filename)s-%(design_size)d.subfonts' % vars()), 'w').write (subfonts_str) - - path = os.path.join (outdir, '%s-%d.dep' % (filename, design_size)) - - deps = r'''%(filename)s-%(design_size)d.otf: $(outdir)/feta%(design_size)d.pfa \ - $(outdir)/parmesan%(design_size)d.pfa \ - $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-table \ - $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-gtable -''' % vars() - open (path, 'w').write (deps) - - open (os.path.join (outdir, basename + '.fontname'), 'w').write ("%s-%d" % (name, design_size)) diff --git a/buildscripts/genicon.py b/buildscripts/genicon.py deleted file mode 100644 index 543735240f..0000000000 --- a/buildscripts/genicon.py +++ /dev/null @@ -1,31 +0,0 @@ -#!@PYTHON@ -import os -import sys -import tempfile - -base = os.path.splitext (os.path.split (sys.argv[1])[1])[0] -input = os.path.abspath (sys.argv[1]) -output = os.path.abspath (sys.argv[2]) -program_name= os.path.split (sys.argv[0])[1] - -dir = tempfile.mktemp (program_name) -os.mkdir (dir, 0777) -os.chdir(dir) - -def system (c): - print c - if os.system (c): - raise 'barf' - -outputs = [] -for sz in [48,32,16] : - - for depth in [24,8]: - out = '%(base)s-%(sz)d-%(depth)d.png' % locals() - system ('convert -depth %(depth)d -sample %(sz)d %(input)s %(out)s' % - locals ()) - outputs.append (out) - -system('icotool --output %s --create %s' % (output, ' '.join (outputs))) -system('rm -rf %(dir)s' % locals()) - diff --git a/buildscripts/git-update-changelog.py b/buildscripts/git-update-changelog.py deleted file mode 100644 index 09f0d16b7a..0000000000 --- a/buildscripts/git-update-changelog.py +++ /dev/null @@ -1,311 +0,0 @@ -#!/usr/bin/python - -import sys -import time -import os -import re -import optparse - -def read_pipe (x): - print 'pipe', x - return os.popen (x).read () - -def system (x): - print x - return os.system (x) - -class PatchFailed(Exception): - pass - -def sign (x): - if x < 0: - return -1 - if x > 0: - return 1 - - return 0 - - -class Commit: - def __init__ (self, dict): - for v in ('message', - 'date', - 'author', - 'committish'): - self.__dict__[v] = dict[v] - - self.date = ' '.join (self.date.split (' ')[:-1]) - self.date = time.strptime (self.date, '%a %b %d %H:%M:%S %Y') - - m = re.search ('(.*)<(.*)>', self.author) - self.email = m.group (2).strip () - self.name = m.group (1).strip () - self.diff = read_pipe ('git show %s' % self.committish) - def compare (self, other): - return sign (time.mktime (self.date) - time.mktime (other.date)) - - - def check_diff_chunk (self, filename, chunk): - removals = [] - def note_removal (m): - removals.append (m.group (1)) - - re.sub ('\n-([^\n]+)', note_removal, chunk) - - if removals == []: - return True - if not os.path.exists (filename): - return False - - contents = open (filename).read () - for r in removals: - if r not in contents: - return False - - return True - - def check_diff (self): - chunks = re.split ('\ndiff --git ', self.diff) - - ok = True - for c in chunks: - m = re.search ('^a/([^ ]+)', c) - if not m: - continue - - file = m.group (1) - - c = re.sub('\n--- [^\n]+', '', c) - ok = ok and self.check_diff_chunk (file, c) - if not ok: - break - - return ok - - def touched_files (self): - files = [] - def note_file (x): - files.append (x.group (1)) - return '' - - re.sub ('\n--- a/([^\n]+)\n', - note_file, self.diff) - re.sub('\n--- /dev/null\n\\+\\+\\+ b/([^\n]+)', - note_file, self.diff) - - return files - - def has_patch (self): - return self.touched_files () <> [] - - def apply (self, add_del_files): - def note_add_file (x): - add_del_files.append (('add', x.group (1))) - return '' - - def note_del_file (x): - add_del_files.append (('del', x.group (1))) - return '' - - re.sub('\n--- /dev/null\n\\+\\+\\+ b/([^\n]+)', - note_add_file, self.diff) - - re.sub('\n--- a/([^\n]+)\n\\+\\+\\+ /dev/null', - note_del_file, self.diff) - - p = os.popen ('patch -f -p1 ', 'w') - p.write (self.diff) - - if p.close (): - raise PatchFailed, self.committish - - -def parse_commit_log (log): - committish = re.search ('^([^\n]+)', log).group (1) - author = re.search ('\nAuthor:\s+([^\n]+)', log).group (1) - date_match = re.search ('\nDate:\s+([^\n]+)', log) - date = date_match.group (1) - log = log[date_match.end (1):] - - message = re.sub ("\n *", '', log) - message = message.strip () - - c = Commit (locals ()) - return c - -def parse_add_changes (from_commit, max_count=0): - opt = '' - rest = '..' - if max_count: - - # fixme. - assert max_count == 1 - opt = '--max-count=%d' % max_count - rest = '' - - log = read_pipe ('git log %(opt)s %(from_commit)s%(rest)s' % locals ()) - - log = log[len ('commit '):] - log = log.strip () - - if not log: - return [] - - commits = map (parse_commit_log, re.split ('\ncommit ', log)) - commits.reverse () - - return commits - - -def header (commit): - return '%d-%02d-%02d %s <%s>\n' % (commit.date[:3] + (commit.name, commit.email)) - -def changelog_body (commit): - s = '' - s += ''.join ('\n* %s: ' % f for f in commit.touched_files()) - s += '\n' + commit.message - - s = s.replace ('\n', '\n\t') - s += '\n' - return s - -def main (): - p = optparse.OptionParser (usage="usage git-update-changelog.py [options] [commits]", - description=""" -Apply GIT patches and update change log. - -Run this file from the CVS directory, with commits from the repository in --git-dir. - -""") - p.add_option ("--start", - action='store', - default='', - metavar="FIRST", - dest="start", - help="all commits starting with FIRST (exclusive).") - - p.add_option ("--git-dir", - action='store', - default='', - dest="gitdir", - help="the GIT directory to merge.") - - (options, args) = p.parse_args () - - log = open ('ChangeLog').read () - - if options.gitdir: - os.environ['GIT_DIR'] = options.gitdir - - - if not args: - if not options.start: - print 'Must set start committish.' - sys.exit (1) - - commits = parse_add_changes (options.start) - else: - commits = [] - for a in args: - commits += parse_add_changes (a, max_count=1) - - if not commits: - return - - new_log = '' - last_commit = None - - first = header (commits[0]) + '\n' - if first == log[:len (first)]: - log = log[len (first):] - - try: - previously_done = dict((c, 1) for c in open ('.git-commits-done').read ().split ('\n')) - except IOError: - previously_done = {} - - commits = [c for c in commits if not previously_done.has_key (c.committish)] - commits = sorted (commits, cmp=Commit.compare) - - system ('cvs up') - - file_adddel = [] - collated_log = '' - collated_message = '' - commits_done = [] - while commits: - c = commits[0] - - if not c.has_patch (): - print 'patchless commit (merge?)' - continue - - ok = c.check_diff () - - if not ok: - print "Patch doesn't seem to apply" - print 'skipping', c.committish - print 'message:', c.message - - break - - - commits = commits[1:] - commits_done.append (c) - - print 'patch ', c.committish - try: - c.apply (file_adddel) - except PatchFailed: - break - - if c.touched_files () == ['ChangeLog']: - continue - - if (last_commit - and c.author != last_commit.author - and c.date[:3] != last_commit.date[:3]): - - new_log += header (last_commit) - - collated_log = changelog_body (c) + collated_log - last_commit = c - - collated_message += c.message + '\n' - - - - for (op, f) in file_adddel: - if op == 'del': - system ('cvs remove %(f)s' % locals ()) - if op == 'add': - system ('cvs add %(f)s' % locals ()) - - if last_commit: - collated_log = header (last_commit) + collated_log + '\n' - - log = collated_log + log - - try: - os.unlink ('ChangeLog~') - except OSError: - pass - - os.rename ('ChangeLog', 'ChangeLog~') - open ('ChangeLog', 'w').write (log) - - open ('.msg','w').write (collated_message) - print '\nCommit message\n**\n%s\n**\n' % collated_message - print '\nRun:\n\n\tcvs commit -F .msg\n\n' - print '\n\techo %s >> .git-commits-done\n\n' % ' '.join ([c.committish - for c in commits_done]) - - - if commits: - print 'Commits left to do:' - print ' '.join ([c.committish for c in commits]) - -main () - - - diff --git a/buildscripts/grand-replace.sh b/buildscripts/grand-replace.sh deleted file mode 100644 index 645eae6ea8..0000000000 --- a/buildscripts/grand-replace.sh +++ /dev/null @@ -1,5 +0,0 @@ -#! @BASH@ -# note: dash does not work - -pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*2007' '\1 2007--2008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change') -pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*([^-]*--)(200[0-7])' '\1 \2\062008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change') diff --git a/buildscripts/help2man.pl b/buildscripts/help2man.pl deleted file mode 100644 index 9cb09c4859..0000000000 --- a/buildscripts/help2man.pl +++ /dev/null @@ -1,559 +0,0 @@ -#!@PERL@ -w - -# Generate a short man page from --help and --version output. -# Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software -# Foundation, Inc. - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - -# Written by Brendan O'Dea -# Available from ftp://ftp.gnu.org/gnu/help2man/ - -use 5.005; -use strict; -use Getopt::Long; -use Text::Tabs qw(expand); -use POSIX qw(strftime setlocale LC_TIME); - -my $this_program = 'help2man'; -my $this_version = '1.28'; -my $version_info = < -EOT - -my $help_info = <. -EOT - -my $section = 1; -my $manual = ''; -my $source = ''; -my $help_option = '--help'; -my $version_option = '--version'; -my ($opt_name, @opt_include, $opt_output, $opt_info, $opt_no_info); - -my %opt_def = ( - 'n|name=s' => \$opt_name, - 's|section=s' => \$section, - 'm|manual=s' => \$manual, - 'S|source=s' => \$source, - 'i|include=s' => sub { push @opt_include, [ pop, 1 ] }, - 'I|opt-include=s' => sub { push @opt_include, [ pop, 0 ] }, - 'o|output=s' => \$opt_output, - 'p|info-page=s' => \$opt_info, - 'N|no-info' => \$opt_no_info, - 'h|help-option=s' => \$help_option, - 'v|version-option=s' => \$version_option, -); - -# Parse options. -Getopt::Long::config('bundling'); -GetOptions (%opt_def, - help => sub { print $help_info; exit }, - version => sub { print $version_info; exit }, -) or die $help_info; - -die $help_info unless @ARGV == 1; - -my %include = (); -my %append = (); -my @include = (); # retain order given in include file - -# Process include file (if given). Format is: -# -# [section name] -# verbatim text -# -# or -# -# /pattern/ -# verbatim text -# - -while (@opt_include) -{ - my ($inc, $required) = @{shift @opt_include}; - - next unless -f $inc or $required; - die "$this_program: can't open `$inc' ($!)\n" - unless open INC, $inc; - - my $key; - my $hash = \%include; - - while () - { - # [section] - if (/^\[([^]]+)\]/) - { - $key = uc $1; - $key =~ s/^\s+//; - $key =~ s/\s+$//; - $hash = \%include; - push @include, $key unless $include{$key}; - next; - } - - # /pattern/ - if (m!^/(.*)/([ims]*)!) - { - my $pat = $2 ? "(?$2)$1" : $1; - - # Check pattern. - eval { $key = qr($pat) }; - if ($@) - { - $@ =~ s/ at .*? line \d.*//; - die "$inc:$.:$@"; - } - - $hash = \%append; - next; - } - - # Check for options before the first section--anything else is - # silently ignored, allowing the first for comments and - # revision info. - unless ($key) - { - # handle options - if (/^-/) - { - local @ARGV = split; - GetOptions %opt_def; - } - - next; - } - - $hash->{$key} ||= ''; - $hash->{$key} .= $_; - } - - close INC; - - die "$this_program: no valid information found in `$inc'\n" - unless $key; -} - -# Compress trailing blank lines. -for my $hash (\(%include, %append)) -{ - for (keys %$hash) { $hash->{$_} =~ s/\n+$/\n/ } -} - -# Turn off localisation of executable's output. -@ENV{qw(LANGUAGE LANG LC_ALL)} = ('C') x 3; - -# Turn off localisation of date (for strftime). -setlocale LC_TIME, 'C'; - -# Grab help and version info from executable. -my ($help_text, $version_text) = map { - join '', map { s/ +$//; expand $_ } `$ARGV[0] $_ 2>/dev/null` - or die "$this_program: can't get `$_' info from $ARGV[0]\n" -} $help_option, $version_option; - -my $date = strftime "%B %Y", localtime; -(my $program = $ARGV[0]) =~ s!.*/!!; -my $package = $program; -my $version; - -if ($opt_output) -{ - unlink $opt_output - or die "$this_program: can't unlink $opt_output ($!)\n" - if -e $opt_output; - - open STDOUT, ">$opt_output" - or die "$this_program: can't create $opt_output ($!)\n"; -} - -# The first line of the --version information is assumed to be in one -# of the following formats: -# -# -# -# {GNU,Free} -# ({GNU,Free} ) -# - {GNU,Free} -# -# and seperated from any copyright/author details by a blank line. - -($_, $version_text) = split /\n+/, $version_text, 2; - -if (/^(\S+) +\(((?:GNU|Free) +[^)]+)\) +(.*)/ or - /^(\S+) +- *((?:GNU|Free) +\S+) +(.*)/) -{ - $program = $1; - $package = $2; - $version = $3; -} -elsif (/^((?:GNU|Free) +)?(\S+) +(.*)/) -{ - $program = $2; - $package = $1 ? "$1$2" : $2; - $version = $3; -} -else -{ - $version = $_; -} - -$program =~ s!.*/!!; - -# No info for `info' itself. -$opt_no_info = 1 if $program eq 'info'; - -# --name overrides --include contents. -$include{NAME} = "$program \\- $opt_name\n" if $opt_name; - -# Default (useless) NAME paragraph. -$include{NAME} ||= "$program \\- manual page for $program $version\n"; - -# Man pages traditionally have the page title in caps. -my $PROGRAM = uc $program; - -# Set default page head/footers -$source ||= "$program $version"; -unless ($manual) -{ - for ($section) - { - if (/^(1[Mm]|8)/) { $manual = 'System Administration Utilities' } - elsif (/^6/) { $manual = 'Games' } - else { $manual = 'User Commands' } - } -} - -# Extract usage clause(s) [if any] for SYNOPSIS. -if ($help_text =~ s/^Usage:( +(\S+))(.*)((?:\n(?: {6}\1| *or: +\S).*)*)//m) -{ - my @syn = $2 . $3; - - if ($_ = $4) - { - s/^\n//; - for (split /\n/) { s/^ *(or: +)?//; push @syn, $_ } - } - - my $synopsis = ''; - for (@syn) - { - $synopsis .= ".br\n" if $synopsis; - s!^\S*/!!; - s/^(\S+) *//; - $synopsis .= ".B $1\n"; - s/\s+$//; - s/(([][]|\.\.+)+)/\\fR$1\\fI/g; - s/^/\\fI/ unless s/^\\fR//; - $_ .= '\fR'; - s/(\\fI)( *)/$2$1/g; - s/\\fI\\fR//g; - s/^\\fR//; - s/\\fI$//; - s/^\./\\&./; - - $synopsis .= "$_\n"; - } - - $include{SYNOPSIS} ||= $synopsis; -} - -# Process text, initial section is DESCRIPTION. -my $sect = 'DESCRIPTION'; -$_ = "$help_text\n\n$version_text"; - -# Normalise paragraph breaks. -s/^\n+//; -s/\n*$/\n/; -s/\n\n+/\n\n/g; - -# Temporarily exchange leading dots, apostrophes and backslashes for -# tokens. -s/^\./\x80/mg; -s/^'/\x81/mg; -s/\\/\x82/g; - -# Start a new paragraph (if required) for these. -s/([^\n])\n(Report +bugs|Email +bug +reports +to|Written +by)/$1\n\n$2/g; - -sub convert_option; - -while (length) -{ - # Convert some standard paragraph names. - if (s/^(Options|Examples): *\n//) - { - $sect = uc $1; - next; - } - - # Copyright section - if (/^Copyright +[(\xa9]/) - { - $sect = 'COPYRIGHT'; - $include{$sect} ||= ''; - $include{$sect} .= ".PP\n" if $include{$sect}; - - my $copy; - ($copy, $_) = split /\n\n/, $_, 2; - - for ($copy) - { - # Add back newline - s/\n*$/\n/; - - # Convert iso9959-1 copyright symbol or (c) to nroff - # character. - s/^Copyright +(?:\xa9|\([Cc]\))/Copyright \\(co/mg; - - # Insert line breaks before additional copyright messages - # and the disclaimer. - s/(.)\n(Copyright |This +is +free +software)/$1\n.br\n$2/g; - - # Join hyphenated lines. - s/([A-Za-z])-\n */$1/g; - } - - $include{$sect} .= $copy; - $_ ||= ''; - next; - } - - # Catch bug report text. - if (/^(Report +bugs|Email +bug +reports +to) /) - { - $sect = 'REPORTING BUGS'; - } - - # Author section. - elsif (/^Written +by/) - { - $sect = 'AUTHOR'; - } - - # Examples, indicated by an indented leading $, % or > are - # rendered in a constant width font. - if (/^( +)([\$\%>] )\S/) - { - my $indent = $1; - my $prefix = $2; - my $break = '.IP'; - $include{$sect} ||= ''; - while (s/^$indent\Q$prefix\E(\S.*)\n*//) - { - $include{$sect} .= "$break\n\\f(CW$prefix$1\\fR\n"; - $break = '.br'; - } - - next; - } - - my $matched = ''; - $include{$sect} ||= ''; - - # Sub-sections have a trailing colon and the second line indented. - if (s/^(\S.*:) *\n / /) - { - $matched .= $& if %append; - $include{$sect} .= qq(.SS "$1"\n); - } - - my $indent = 0; - my $content = ''; - - # Option with description. - if (s/^( {1,10}([+-]\S.*?))(?:( +(?!-))|\n( {20,}))(\S.*)\n//) - { - $matched .= $& if %append; - $indent = length ($4 || "$1$3"); - $content = ".TP\n\x83$2\n\x83$5\n"; - unless ($4) - { - # Indent may be different on second line. - $indent = length $& if /^ {20,}/; - } - } - - # Option without description. - elsif (s/^ {1,10}([+-]\S.*)\n//) - { - $matched .= $& if %append; - $content = ".HP\n\x83$1\n"; - $indent = 80; # not continued - } - - # Indented paragraph with tag. - elsif (s/^( +(\S.*?) +)(\S.*)\n//) - { - $matched .= $& if %append; - $indent = length $1; - $content = ".TP\n\x83$2\n\x83$3\n"; - } - - # Indented paragraph. - elsif (s/^( +)(\S.*)\n//) - { - $matched .= $& if %append; - $indent = length $1; - $content = ".IP\n\x83$2\n"; - } - - # Left justified paragraph. - else - { - s/(.*)\n//; - $matched .= $& if %append; - $content = ".PP\n" if $include{$sect}; - $content .= "$1\n"; - } - - # Append continuations. - while (s/^ {$indent}(\S.*)\n//) - { - $matched .= $& if %append; - $content .= "\x83$1\n" - } - - # Move to next paragraph. - s/^\n+//; - - for ($content) - { - # Leading dot and apostrophe protection. - s/\x83\./\x80/g; - s/\x83'/\x81/g; - s/\x83//g; - - # Convert options. - s/(^| )(-[][\w=-]+)/$1 . convert_option $2/mge; - } - - # Check if matched paragraph contains /pat/. - if (%append) - { - for my $pat (keys %append) - { - if ($matched =~ $pat) - { - $content .= ".PP\n" unless $append{$pat} =~ /^\./; - $content .= $append{$pat}; - } - } - } - - $include{$sect} .= $content; -} - -# Refer to the real documentation. -unless ($opt_no_info) -{ - my $info_page = $opt_info || $program; - - $sect = 'SEE ALSO'; - $include{$sect} ||= ''; - $include{$sect} .= ".PP\n" if $include{$sect}; - $include{$sect} .= <|)(.*?)(?:|)'), - r'@command{\1}'), - 'texi2html': - (re.compile (r'@command{(.*?)}'), - r'\1'), - }, - 'code': { - 'html2texi': - (re.compile (r'(.*?)'), - r'@code{\1}'), - 'texi2html': - (re.compile (r'@code{(.*?)}'), - r'\1'), - }, - } - -whitespaces = re.compile (r'\s+') - - -def _ (s): - if not s: - return '' - str = whitespaces.sub (' ', s) - for c in html_codes: - str = str.replace (c[1], c[0]) - for command in texi_html_conversion: - d = texi_html_conversion[command] - str = d['html2texi'][0].sub (d['html2texi'][1], str) - str = my_gettext (str) - str = d['texi2html'][0].sub (d['texi2html'][1], str) - for c in html_codes: - str = str.replace (c[0], c[1]) - return str - -link_re = re.compile (r'') - -def link_gettext (m): - return '' - -makeinfo_title_re = re.compile (r'([^<]*?) - ([^<]*?)') - -def makeinfo_title_gettext (m): - return '' + _ (m.group (1)) + ' - ' + m.group (2) + '' - -texi2html_title_re = re.compile (r'(.+): ([A-Z\d.]+ |)(.+?)') - -def texi2html_title_gettext (m): - return '' + _ (m.group (1)) + double_punct_char_separator + ': ' \ - + m.group (2) + _ (m.group (3)) + '' - -a_href_re = re.compile ('(?s)[^>]*?href="[\\w.#-_]+"[^>]*?>)(?P)?\ -(?PAppendix )?(?P[A-Z0-9.]+ | (?:<){1,2} | [^>:]+?: | |)\ -(?P(?:|||[^>])+?)(?P(?(code)|))\ -(?P (?:>){1,2} | |):?') - -def a_href_gettext (m): - s = '' - if m.group(0)[-1] == ':': - s = double_punct_char_separator + ':' - t = '' - if m.group ('appendix'): - t = _ (m.group ('appendix')) - return '' + s - -h_re = re.compile (r'\s*(Appendix |)([A-Z\d.]+ |)(.+?)\s*') - -def h_gettext (m): - if m.group (3): - s = _ (m.group (3)) - else: - s= '' - return '' + s +\ - m.group (4) + _ (m.group (5)) + '' - -for filename in files: - f = open (filename, 'r') - page = f.read () - f.close () - page = link_re.sub (link_gettext, page) - page = makeinfo_title_re.sub (makeinfo_title_gettext, page) - page = texi2html_title_re.sub (texi2html_title_gettext, page) - page = a_href_re.sub (a_href_gettext, page) - page = h_re.sub (h_gettext, page) - for w in ('Next:', 'Previous:', 'Up:'): - page = page.replace (w, _ (w)) - page = langdefs.LANGDICT[lang].html_filter (page) - f = open (os.path.join (outdir, filename), 'w') - f.write (page) - f.close () diff --git a/buildscripts/install-info-html.sh b/buildscripts/install-info-html.sh deleted file mode 100644 index a116cd93d0..0000000000 --- a/buildscripts/install-info-html.sh +++ /dev/null @@ -1,157 +0,0 @@ -#!@BASH@ - -name=install-info-html -version=1.0 - -all= -index_dir=. - -# -# debugging -# -debug_echo=: - - -# -# print usage -# -help () -{ - cat << EOF -$name $version -Install HTML info document. - -Usage: $name [OPTIONS]... [DOCUMENT-DIR]... - -Options: - -a, --all assume all subdirectories of index to be DOCUMENT-DIRs - -d, --dir=DIR set index directory to DIR (default=.) - -D, --debug print debugging info - -h, --help show this help text - -v, --version show version -EOF -} - - -cleanup () -{ - $debug_echo "cleaning ($?)..." -} - -trap cleanup 0 9 15 - -# -# Find command line options and switches -# - -# "x:" x takes argument -# -options="adhvW:" -# -# ugh, "\-" is a hack to support long options -# must be in double quotes for bash-2.0 - -while getopts "\-:$options" O -do - $debug_echo "O: \`$O'" - $debug_echo "arg: \`$OPTARG'" - case $O in - a) - all=yes - ;; - D) - [ "$debug_echo" = "echo" ] && set -x - debug_echo=echo - ;; - h) - help; - exit 0 - ;; - v) - echo $name $version - exit 0 - ;; - d) - index_dir=$OPTARG - ;; - # a long option! - -) - case "$OPTARG" in - a*|-a*) - all=yes - ;; - de*|-de*) - [ "$debug_echo" = "echo" ] && set -x - debug_echo=echo - ;; - h*|-h*) - help; - exit 0 - ;; - di*|-di*) - index_dir="`expr \"$OPTARG\" ':' '[^=]*=\(.*\)'`" - ;; - version|-version) - echo $name $version - exit 0 - ;; - *|-*) - echo "$0: invalid option -- \"$OPTARG\"" - help; - exit -1 - ;; - esac - esac -done -shift `expr $OPTIND - 1` - -# -# Input file name -# -if [ -z "$all" -a -z "$1" ]; then - help - echo "$name: No HTML documents given" - exit 2 -fi - -if [ -n "$all" -a -n "$1" ]; then - echo "$name: --all specified, ignoring DIRECTORY-DIRs" -fi - -if [ -n "$all" ]; then - document_dirs=`/bin/ls -d1 $index_dir` -else - document_dirs=$* -fi - -index_file=$index_dir/index.html -rm -f $index_file -echo -n "$name: Writing index: $index_file..." - -# head -cat >> $index_file < -Info documentation index - -

Info documentation index

-

-This is the directory file \`index.html' a.k.a. \`DIR', which contains the -topmost node of the HTML Info hierarchy. -

-
    -EOF - -#list -for i in $document_dirs; do - cat < $i ($i as one big page) -EOF -done >> $index_file - -# foot -cat >> $index_file < - - -EOF -echo diff --git a/buildscripts/lilypond-words.py b/buildscripts/lilypond-words.py deleted file mode 100644 index e9851f6231..0000000000 --- a/buildscripts/lilypond-words.py +++ /dev/null @@ -1,149 +0,0 @@ -#!@PYTHON@ - -# Created 01 September 2003 by Heikki Junes. -# Rewritten by John Mandereau - -# Generates lilypond-words.el for (X)Emacs and lilypond-words[.vim] for Vim. - -import re -import sys -import os -import getopt - -keywords = [] -reserved_words = [] -note_names = [] - -# keywords not otherwise found -keywords += ['include', 'maininput', 'version'] - -# the main keywords -s = open ('lily/lily-lexer.cc', 'r').read () -keywords += [w for w in re.findall (r"\s*{\"(.+)\",\s*.*},\s*\n", s)] - -s = open ('scm/markup.scm', 'r').read () -keywords += [w for w in re.findall (r"(?m)^\s*\(cons\s*([a-z-]+)-markup", s)] - -# identifiers and keywords -for name in ['ly/chord-modifiers-init.ly', - 'ly/dynamic-scripts-init.ly', - 'ly/engraver-init.ly', - 'ly/grace-init.ly', - 'ly/gregorian.ly', - 'ly/music-functions-init.ly', - 'ly/performer-init.ly', - 'ly/property-init.ly', - 'ly/scale-definitions-init.ly', - 'ly/script-init.ly', - 'ly/spanners-init.ly', - 'ly/declarations-init.ly', - 'ly/params-init.ly']: - s = open (name, 'r').read () - keywords += [w for w in re.findall (r"(?m)^\s*\"?([a-zA-Z]+)\"?\s*=", s)] - -# note names -for name in ['ly/catalan.ly', - 'ly/deutsch.ly', - 'ly/drumpitch-init.ly', - 'ly/english.ly', - 'ly/espanol.ly', - 'ly/italiano.ly', - 'ly/nederlands.ly', - 'ly/norsk.ly', - 'ly/portugues.ly', - 'ly/suomi.ly', - 'ly/svenska.ly', - 'ly/vlaams.ly']: - s = open (name, 'r').read () - note_names += [n for n in re.findall (r"(?m)^\s*\(([a-z]+)[^l]+ly:make-pitch", s)] - -# reserved words -for name in ['ly/engraver-init.ly', - 'ly/performer-init.ly']: - s = open (name, 'r').read () - for pattern in [r"(?m)^\s*.consists\s+\"([a-zA-Z_]+)\"", - r"[\\]name\s+[\"]?([a-zA-Z_]+)[\"]?", - r"\s+([a-zA-Z_]+)\s*\\(?:set|override)"]: - reserved_words += [w for w in re.findall (pattern, s)] - -keywords = list (set (keywords)) -keywords.sort (reverse=True) - -reserved_words = list (set (reserved_words)) -reserved_words.sort (reverse=True) - -note_names = list (set (note_names)) -note_names.sort (reverse=True) - - -# output -outdir = '' -out_words = False -out_el = False -out_vim = False - -options = getopt.getopt (sys.argv[1:], - '', ['words', 'el', 'vim', 'dir='])[0] - -for (o, a) in options: - if o == '--words': - out_words = True - elif o == '--el': - out_el = True - elif o == '--vim': - out_vim = True - elif o == '--dir': - outdir = a - -if out_words or out_el: - outstring = ''.join (['\\\\' + w + '\n' for w in keywords]) - outstring += ''.join ([w + '\n' for w in reserved_words]) - outstring += ''.join ([w + '\n' for w in note_names]) - -if out_words: - f = open (os.path.join (outdir, 'lilypond-words'), 'w') - f.write (outstring) - -if out_el: - f = open (os.path.join (outdir, 'lilypond-words.el'), 'w') - f.write (outstring) - - # the menu in lilypond-mode.el - # for easier typing of this list, replace '/' with '\' below - # when writing to file - elisp_menu = ['/( - _ /) -', - '/[ - _ /] -', - '< - _ > -', - '<< - _ >> -', - '///( - _ ///) -', - '///[ - _ ///] -', - '///< - _ ///! -', - '///> - _ ///! -', - '//center - / << _ >> -', - '//column - / << _ >> -', - '//context/ Staff/ = - % { _ } -', - '//context/ Voice/ = - % { _ } -', - '//markup - { _ } -', - '//notes - { _ } -', - '//relative - % { _ } -', - '//score - { //n /? //simultaneous { //n _ //n } /! //n //paper { } //n /? //midi { } //n /! } //n -', - '//simultaneous - { _ } -', - '//sustainDown - _ //sustainUp -', - '//times - % { _ } -', - '//transpose - % { _ } -', - ''] - f.write ('\n'.join ([line.replace ('/', '\\') for line in elisp_menu])) - -if out_vim: - f = open (os.path.join (outdir, 'lilypond-words.vim'), 'w') - f.write ('syn match lilyKeyword \"[-_^]\\?\\\\\\(') - f.write (''.join ([w + '\\|' for w in keywords])) - f.write ('n\\)\\(\\A\\|\\n\\)\"me=e-1\n') - - f.write ('syn match lilyReservedWord \"\\(\\A\\|\\n\\)\\(') - f.write (''.join ([w + '\\|' for w in reserved_words])) - f.write ('Score\\)\\(\\A\\|\\n\\)\"ms=s+1,me=e-1\n') - - f.write ('syn match lilyNote \"\\<\\(\\(\\(') - f.write (''.join ([w + '\\|' for w in note_names])) - f.write ('a\\)\\([,\']\\)\\{,4}\\([?!]\\)\\?\\)\\|s\\|r\\)\\(\\(128\\|64\\|32\\|16\\|8\\|4\\|2\\|1\\|\\\\breve\\|\\\\longa\\|\\\\maxima\\)[.]\\{,8}\\)\\?\\(\\A\\|\\n\\)\"me=e-1\n') diff --git a/buildscripts/lys-to-tely.py b/buildscripts/lys-to-tely.py deleted file mode 100644 index c9d698f92c..0000000000 --- a/buildscripts/lys-to-tely.py +++ /dev/null @@ -1,125 +0,0 @@ -#!@PYTHON@ - - -''' -TODO: - - * Add @nodes, split at sections? - -''' - - -import sys -import os -import getopt -import re - -program_name = 'lys-to-tely' - -include_snippets = '@lysnippets' -fragment_options = 'printfilename,texidoc' -help_text = r"""Usage: %(program_name)s [OPTIONS]... LY-FILE... -Construct tely doc from LY-FILEs. - -Options: - -h, --help print this help - -f, --fragment-options=OPTIONS use OPTIONS as lilypond-book fragment - options - -o, --output=NAME write tely doc to NAME - -t, --title=TITLE set tely doc title TITLE - --template=TEMPLATE use TEMPLATE as Texinfo template file, - instead of standard template; TEMPLATE should contain a command - '%(include_snippets)s' to tell where to insert LY-FILEs. When this - option is used, NAME and TITLE are ignored. -""" - -def help (text): - sys.stdout.write ( text) - sys.exit (0) - -(options, files) = getopt.getopt (sys.argv[1:], 'f:hn:t:', - ['fragment-options=', 'help', 'name=', 'title=', 'template=']) - -name = "ly-doc" -title = "Ly Doc" -template = '''\input texinfo -@setfilename %%(name)s.info -@settitle %%(title)s - -@documentencoding utf-8 -@iftex -@afourpaper -@end iftex - -@finalout @c we do not want black boxes. - -@c fool ls-latex -@ignore -@author Han-Wen Nienhuys and Jan Nieuwenhuizen -@title %%(title)s -@end ignore - -@node Top, , , (dir) -@top %%(title)s - -%s - -@bye -''' % include_snippets - -for opt in options: - o = opt[0] - a = opt[1] - if o == '-h' or o == '--help': - # We can't use vars () inside a function, as that only contains all - # local variables and none of the global variables! Thus we have to - # generate the help text here and pass it to the function... - help (help_text % vars ()) - elif o == '-n' or o == '--name': - name = a - elif o == '-t' or o == '--title': - title = a - elif o == '-f' or o == '--fragment-options': - fragment_options = a - elif o == '--template': - template = open (a, 'r').read () - else: - raise Exception ('unknown option: ' + o) - -texi_file_re = re.compile ('.*\.i?te(ly|xi)$') - -def name2line (n): - if texi_file_re.match (n): - # We have a texi include file, simply include it: - s = r"@include %s" % os.path.basename (n) - else: - # Assume it's a lilypond file -> create image etc. - s = r""" -@ifhtml -@html - -@end html -@end ifhtml - -@lilypondfile[%s]{%s} -""" % (os.path.basename (n), fragment_options, n) - return s - -if files: - dir = os.path.dirname (name) or "." -# don't strip .tely extension, input/lsr uses .itely - name = os.path.basename (name) - template = template % vars () - - s = "\n".join (map (name2line, files)) - s = template.replace (include_snippets, s, 1) - f = "%s/%s" % (dir, name) - sys.stderr.write ("%s: writing %s..." % (program_name, f)) - h = open (f, "w") - h.write (s) - h.close () - sys.stderr.write ('\n') -else: - # not Unix philosophy, but hey, at least we notice when - # we don't distribute any .ly files. - sys.stderr.write ("No files specified. Doing nothing") diff --git a/buildscripts/makelsr.py b/buildscripts/makelsr.py deleted file mode 100644 index cb0619f6de..0000000000 --- a/buildscripts/makelsr.py +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/env python - -import sys -import os -import glob -import re - -USAGE = ''' Usage: makelsr.py LSR_SNIPPETS_DIR -This script must be run from top of the source tree; -it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR. -''' - -LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it -%% This file is in the public domain. -''' - -LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new -%% This file is in the public domain. -''' - -DEST = os.path.join ('input', 'lsr') -NEW_LYS = os.path.join ('input', 'new') -TEXIDOCS = os.path.join ('input', 'texidocs') - -TAGS = [] -# NR 1 -TAGS.extend (['pitches', 'rhythms', 'expressive-marks', -'repeats', 'simultaneous-notes', 'staff-notation', -'editorial-annotations', 'text']) -# NR 2 -TAGS.extend (['vocal-music', 'chords', 'keyboards', -'percussion', 'fretted-strings', 'unfretted-strings', -'ancient-notation', 'winds', 'world-music' -]) - -# other -TAGS.extend (['contexts-and-engravers', 'tweaks-and-overrides', -'paper-and-layout', 'breaks', 'spacing', 'midi', 'titles', 'template']) - -def exit_with_usage (n=0): - sys.stderr.write (USAGE) - sys.exit (n) - -try: - in_dir = sys.argv[1] -except: - exit_with_usage (2) - -if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)): - exit_with_usage (3) - -unsafe = [] -unconverted = [] -notags_files = [] - -# mark the section that will be printed verbatim by lilypond-book -end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S) - -def mark_verbatim_section (ly_code): - return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1) - -# '% LSR' comments are to be stripped -lsr_comment_re = re.compile (r'\s*%+\s*LSR.*') - -begin_header_re = re.compile (r'\\header\s*{', re.M) - -# add tags to ly files from LSR -def add_tags (ly_code, tags): - return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1) - -def copy_ly (srcdir, name, tags): - global unsafe - global unconverted - dest = os.path.join (DEST, name) - tags = ', '.join (tags) - s = open (os.path.join (srcdir, name)).read () - - texidoc_translations_path = os.path.join (TEXIDOCS, - os.path.splitext (name)[0] + '.texidoc') - if os.path.exists (texidoc_translations_path): - texidoc_translations = open (texidoc_translations_path).read () - # Since we want to insert the translations verbatim using a - # regexp, \\ is understood as ONE escaped backslash. So we have - # to escape those backslashes once more... - texidoc_translations = texidoc_translations.replace ('\\', '\\\\') - s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1) - - if in_dir in srcdir: - s = LY_HEADER_LSR + add_tags (s, tags) - else: - s = LY_HEADER_NEW + s - - s = mark_verbatim_section (s) - s = lsr_comment_re.sub ('', s) - open (dest, 'w').write (s) - - e = os.system ("convert-ly -e '%s'" % dest) - if e: - unconverted.append (dest) - if os.path.exists (dest + '~'): - os.remove (dest + '~') - # -V seems to make unsafe snippets fail nicer/sooner - e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest) - if e: - unsafe.append (dest) - -def read_source_with_dirs (src): - s = {} - l = {} - for tag in TAGS: - srcdir = os.path.join (src, tag) - l[tag] = set (map (os.path.basename, glob.glob (os.path.join (srcdir, '*.ly')))) - for f in l[tag]: - if f in s: - s[f][1].append (tag) - else: - s[f] = (srcdir, [tag]) - return s, l - - -tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"') - -def read_source (src): - s = {} - l = dict ([(tag, set()) for tag in TAGS]) - for f in glob.glob (os.path.join (src, '*.ly')): - basename = os.path.basename (f) - m = tags_re.search (open (f, 'r').read ()) - if m: - file_tags = [tag.strip() for tag in m.group (1). split(',')] - s[basename] = (src, file_tags) - [l[tag].add (basename) for tag in file_tags if tag in TAGS] - else: - notags_files.append (f) - return s, l - - -def dump_file_list (file, list): - f = open (file, 'w') - f.write ('\n'.join (list) + '\n') - -## clean out existing lys and generated files -map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) + - glob.glob (os.path.join (DEST, '*.snippet-list'))) - -# read LSR source where tags are defined by subdirs -snippets, tag_lists = read_source_with_dirs (in_dir) -# read input/new where tags are directly -s, l = read_source (NEW_LYS) -snippets.update (s) -for t in TAGS: - tag_lists[t].update (l[t]) - -for (name, (srcdir, tags)) in snippets.items (): - copy_ly (srcdir, name, tags) - -for (tag, file_set) in tag_lists.items (): - dump_file_list (os.path.join (DEST, tag + '.snippet-list'), sorted(file_set)) - -if unconverted: - sys.stderr.write ('These files could not be converted successfully by convert-ly:\n') - sys.stderr.write ('\n'.join (unconverted) + '\n\n') - -if notags_files: - sys.stderr.write ('No tags could be found in these files:\n') - sys.stderr.write ('\n'.join (notags_files) + '\n\n') - -dump_file_list ('lsr-unsafe.txt', unsafe) -sys.stderr.write (''' - -Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY! - git add input/lsr/*.ly - xargs git-diff HEAD < lsr-unsafe.txt - -''') - diff --git a/buildscripts/manuals_definitions.py b/buildscripts/manuals_definitions.py deleted file mode 100644 index e8e6d50cd9..0000000000 --- a/buildscripts/manuals_definitions.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/python - -# This module is imported by check_texi_refs.py - -references_dict = { - 'lilypond': 'ruser', - 'lilypond-learning': 'rlearning', - 'lilypond-program': 'rprogram', - 'lilypond-snippets': 'rlsr', - 'music-glossary': 'rglos', - 'lilypond-internals': 'rinternals' } diff --git a/buildscripts/mass-link.py b/buildscripts/mass-link.py deleted file mode 100644 index 17412e5559..0000000000 --- a/buildscripts/mass-link.py +++ /dev/null @@ -1,67 +0,0 @@ -#!@PYTHON@ -# mass-link.py - -# USAGE: mass-link.py [--prepend-suffix SUFFIX] symbolic | hard SOURCEDIR DESTDIR FILES -# -# create hard or symbolic links to SOURCEDIR/FILES in DESTDIR -# -# If --prepend-suffix is specified, link to foo.bar will be called fooSUFFIX.bar. -# Shell wildcards expansion is performed on FILES. - -import sys -import os -import glob -import getopt - -print "mass-link.py" - -optlist, args = getopt.getopt (sys.argv[1:], '', ['prepend-suffix=']) -link_type, source_dir, dest_dir = args[0:3] -files = args[3:] - -source_dir = os.path.normpath (source_dir) -dest_dir = os.path.normpath (dest_dir) - -prepended_suffix = '' -for x in optlist: - if x[0] == '--prepend-suffix': - prepended_suffix = x[1] - -if prepended_suffix: - def insert_suffix (p): - l = p.split ('.') - if len (l) >= 2: - l[-2] += prepended_suffix - return '.'.join (l) - return p + prepended_suffix -else: - insert_suffix = lambda p: p - -if link_type == 'symbolic': - link = os.symlink -elif link_type == 'hard': - link = os.link -else: - sys.stderr.write(sys.argv[0] + ': ' + link_type + ": wrong argument, expected 'symbolic' or 'hard'\n") - sys.exit (1) - -sourcefiles = [] -for pattern in files: - sourcefiles += (glob.glob (os.path.join (source_dir, pattern))) - -def relative_path (f): - if source_dir == '.': - return f - return f[len (source_dir) + 1:] - -destfiles = [os.path.join (dest_dir, insert_suffix (relative_path (f))) for f in sourcefiles] - -destdirs = set ([os.path.dirname (dest) for dest in destfiles]) -[os.makedirs (d) for d in destdirs if not os.path.exists (d)] - -def force_link (src,dest): - if os.path.exists (dest): - os.system ('rm -f ' + dest) - link (src, dest) - -map (force_link, sourcefiles, destfiles) diff --git a/buildscripts/mf-to-table.py b/buildscripts/mf-to-table.py deleted file mode 100644 index 2c1df75f7f..0000000000 --- a/buildscripts/mf-to-table.py +++ /dev/null @@ -1,244 +0,0 @@ -#!@PYTHON@ - -# mf-to-table.py -- convert spacing info in MF logs . -# -# source file of the GNU LilyPond music typesetter -# -# (c) 1997--2008 Han-Wen Nienhuys - -import os -import sys -import getopt -import re -import time - -def read_log_file (fn): - str = open (fn).read () - str = re.sub ('\n', '', str) - str = re.sub ('[\t ]+', ' ', str) - - deps = [] - autolines = [] - def include_func (match, d = deps): - d.append (match.group (1)) - return '' - - def auto_func (match, a = autolines): - a.append (match.group (1)) - return '' - - str = re.sub ('\\(([/a-z.A-Z_0-9-]+\\.mf)', include_func, str) - str = re.sub ('@{(.*?)@}', auto_func, str) - - return (autolines, deps) - - -class Char_metric: - def __init__ (self): - pass - -font_family = 'feta' - -def parse_logfile (fn): - autolines, deps = read_log_file (fn) - charmetrics = [] - - global_info = { - 'filename' : os.path.splitext (os.path.basename (fn))[0] - } - group = '' - - for l in autolines: - tags = l.split ('@:') - if tags[0] == 'group': - group = tags[1] - elif tags[0] == 'puorg': - group = '' - elif tags[0] == 'char': - name = tags[9] - - if group: - name = group + '.' + name - m = { - 'description': tags[1], - 'name': name, - 'code': int (tags[2]), - 'breapth': float (tags[3]), - 'width': float (tags[4]), - 'depth': float (tags[5]), - 'height': float (tags[6]), - 'wx': float (tags[7]), - 'wy': float (tags[8]), - } - charmetrics.append (m) - elif tags[0] == 'font': - global font_family - font_family = (tags[3]) - # To omit 'GNU' (foundry) from font name proper: - # name = tags[2:] - #urg - if 0: # testing - tags.append ('Regular') - - encoding = re.sub (' ','-', tags[5]) - tags = tags[:-1] - name = tags[1:] - global_info['design_size'] = float (tags[4]) - global_info['FontName'] = '-'.join (name) - global_info['FullName'] = ' '.join (name) - global_info['FamilyName'] = '-'.join (name[1:-1]) - if 1: - global_info['Weight'] = tags[4] - else: # testing - global_info['Weight'] = tags[-1] - - global_info['FontBBox'] = '0 0 1000 1000' - global_info['Ascender'] = '0' - global_info['Descender'] = '0' - global_info['EncodingScheme'] = encoding - - elif tags[0] == 'parameter': - global_info[tags[1]] = tags[2]; - - return (global_info, charmetrics, deps) - - - -def character_lisp_table (global_info, charmetrics): - - def conv_char_metric (charmetric): - f = 1.0 - s = """(%s . -((bbox . (%f %f %f %f)) -(subfont . "%s") -(subfont-index . %d) -(attachment . (%f . %f)))) -""" %(charmetric['name'], - -charmetric['breapth'] * f, - -charmetric['depth'] * f, - charmetric['width'] * f, - charmetric['height'] * f, - global_info['filename'], - charmetric['code'], - charmetric['wx'], - charmetric['wy']) - - return s - - s = '' - for c in charmetrics: - s += conv_char_metric (c) - - return s - - -def global_lisp_table (global_info): - str = '' - - keys = ['staffsize', 'stafflinethickness', 'staff_space', - 'linethickness', 'black_notehead_width', 'ledgerlinethickness', - 'design_size', - 'blot_diameter' - ] - for k in keys: - if global_info.has_key (k): - str = str + "(%s . %s)\n" % (k,global_info[k]) - - return str - - -def ps_encoding (name, global_info, charmetrics): - encs = ['.notdef'] * 256 - for m in charmetrics: - encs[m['code']] = m['name'] - - - s = ('/%s [\n' % name) - for m in range (0, 256): - s += (' /%s %% %d\n' % (encs[m], m)) - s += ('] def\n') - return s - -def get_deps (deps, targets): - s = '' - for t in targets: - t = re.sub ( '^\\./', '', t) - s += ('%s '% t) - s += (": ") - for d in deps: - s += ('%s ' % d) - s += ('\n') - return s - -def help (): - sys.stdout.write(r"""Usage: mf-to-table [OPTIONS] LOGFILEs - -Generate feta metrics table from preparated feta log. - -Options: - -d, --dep=FILE print dependency info to FILE - -h, --help print this help - -l, --ly=FILE name output table - -o, --outdir=DIR prefix for dependency info - -p, --package=DIR specify package - - """) - sys.exit (0) - - -(options, files) = \ - getopt.getopt (sys.argv[1:], - 'a:d:ho:p:t:', - ['enc=', 'outdir=', 'dep=', 'lisp=', - 'global-lisp=', - 'debug', 'help', 'package=']) - -global_lisp_nm = '' -char_lisp_nm = '' -enc_nm = '' -depfile_nm = '' -lyfile_nm = '' -outdir_prefix = '.' - -for opt in options: - o = opt[0] - a = opt[1] - if o == '--dep' or o == '-d': - depfile_nm = a - elif o == '--outdir' or o == '-o': - outdir_prefix = a - elif o == '--lisp': - char_lisp_nm = a - elif o == '--global-lisp': - global_lisp_nm = a - elif o == '--enc': - enc_nm = a - elif o== '--help' or o == '-h': - help() - elif o == '--debug': - debug_b = 1 - else: - print o - raise getopt.error - -base = os.path.splitext (lyfile_nm)[0] - -for filenm in files: - (g, m, deps) = parse_logfile (filenm) - - enc_name = 'FetaEncoding' - if re.search ('parmesan', filenm): - enc_name = 'ParmesanEncoding' - elif re.search ('feta-brace', filenm): - enc_name = 'FetaBraceEncoding' - elif re.search ('feta-alphabet', filenm): - enc_name = 'FetaAlphabetEncoding'; - - open (enc_nm, 'w').write (ps_encoding (enc_name, g, m)) - open (char_lisp_nm, 'w').write (character_lisp_table (g, m)) - open (global_lisp_nm, 'w').write (global_lisp_table (g)) - if depfile_nm: - open (depfile_nm, 'wb').write (get_deps (deps, - [base + '.log', base + '.dvi', base + '.pfa', - depfile_nm, - base + '.pfb'])) diff --git a/buildscripts/mf2pt1.pl b/buildscripts/mf2pt1.pl deleted file mode 100644 index 804fc1f999..0000000000 --- a/buildscripts/mf2pt1.pl +++ /dev/null @@ -1,1090 +0,0 @@ -#! /usr/bin/perl - -################################################## -# Convert stylized Metafont to PostScript Type 1 # -# By Scott Pakin # -################################################## - -######################################################################## -# mf2pt1 # -# Copyright (C) 2008 Scott Pakin # -# # -# This program may be distributed and/or modified under the conditions # -# of the LaTeX Project Public License, either version 1.3c of this # -# license or (at your option) any later version. # -# # -# The latest version of this license is in: # -# # -# http://www.latex-project.org/lppl.txt # -# # -# and version 1.3c or later is part of all distributions of LaTeX # -# version 2006/05/20 or later. # -######################################################################## - -our $VERSION = "2.4.4"; # mf2pt1 version number -require 5.6.1; # I haven't tested mf2pt1 with older Perl versions - -use File::Basename; -use File::Spec; -use Getopt::Long; -use Pod::Usage; -use Math::Trig; -use warnings; -use strict; - -# Define some common encoding vectors. -my @standardencoding = - ((map {"_a$_"} (0..31)), - qw (space exclam quotedbl numbersign dollar percent ampersand - quoteright parenleft parenright asterisk plus comma hyphen - period slash zero one two three four five six seven eight - nine colon semicolon less equal greater question at A B C D E - F G H I J K L M N O P Q R S T U V W X Y Z bracketleft - backslash bracketright asciicircum underscore quoteleft a b c - d e f g h i j k l m n o p q r s t u v w x y z braceleft bar - braceright asciitilde), - (map {"_a$_"} (127..160)), - qw (exclamdown cent sterling fraction yen florin section currency - quotesingle quotedblleft guillemotleft guilsinglleft - guilsinglright fi fl _a176 endash dagger daggerdbl - periodcentered _a181 paragraph bullet quotesinglbase - quotedblbase quotedblright guillemotright ellipsis - perthousand _a190 questiondown _a192 grave acute circumflex - tilde macron breve dotaccent dieresis _a201 ring cedilla - _a204 hungarumlaut ogonek caron emdash), - (map {"_a$_"} (209..224)), - qw (AE _a226 ordfeminine _a228 _a229 _a230 _a231 Lslash Oslash OE - ordmasculine _a236 _a237 _a238 _a239 _a240 ae _a242 _a243 - _a244 dotlessi _a246 _a247 lslash oslash oe germandbls _a252 - _a253 _a254 _a255)); -my @isolatin1encoding = - ((map {"_a$_"} (0..31)), - qw (space exclam quotedbl numbersign dollar percent ampersand - quoteright parenleft parenright asterisk plus comma minus - period slash zero one two three four five six seven eight - nine colon semicolon less equal greater question at A B C D E - F G H I J K L M N O P Q R S T U V W X Y Z bracketleft - backslash bracketright asciicircum underscore quoteleft a b c - d e f g h i j k l m n o p q r s t u v w x y z braceleft bar - braceright asciitilde), - (map {"_a$_"} (128..143)), - qw (dotlessi grave acute circumflex tilde macron breve dotaccent - dieresis _a153 ring cedilla _a156 hungarumlaut ogonek - caron space exclamdown cent sterling currency yen brokenbar - section dieresis copyright ordfeminine guillemotleft - logicalnot hyphen registered macron degree plusminus - twosuperior threesuperior acute mu paragraph periodcentered - cedilla onesuperior ordmasculine guillemotright onequarter - onehalf threequarters questiondown Agrave Aacute Acircumflex - Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex - Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde - Ograve Oacute Ocircumflex Otilde Odieresis multiply Oslash - Ugrave Uacute Ucircumflex Udieresis Yacute Thorn germandbls - agrave aacute acircumflex atilde adieresis aring ae ccedilla - egrave eacute ecircumflex edieresis igrave iacute icircumflex - idieresis eth ntilde ograve oacute ocircumflex otilde - odieresis divide oslash ugrave uacute ucircumflex udieresis - yacute thorn ydieresis)); -my @ot1encoding = - qw (Gamma Delta Theta Lambda Xi Pi Sigma Upsilon Phi - Psi Omega ff fi fl ffi ffl dotlessi dotlessj grave acute caron - breve macron ring cedilla germandbls ae oe oslash AE OE Oslash - suppress exclam quotedblright numbersign dollar percent - ampersand quoteright parenleft parenright asterisk plus comma - hyphen period slash zero one two three four five six seven - eight nine colon semicolon exclamdown equal questiondown - question at A B C D E F G H I J K L M N O P Q R S T U V W X Y - Z bracketleft quotedblleft bracketright circumflex dotaccent - quoteleft a b c d e f g h i j k l m n o p q r s t u v w x y z - endash emdash hungarumlaut tilde dieresis); -my @t1encoding = - qw (grave acute circumflex tilde dieresis hungarumlaut ring caron - breve macron dotaccent cedilla ogonek quotesinglbase - guilsinglleft guilsinglright quotedblleft quotedblright - quotedblbase guillemotleft guillemotright endash emdash cwm - perthousand dotlessi dotlessj ff fi fl ffi ffl space exclam - quotedbl numbersign dollar percent ampersand quoteright - parenleft parenright asterisk plus comma hyphen period slash - zero one two three four five six seven eight nine colon - semicolon less equal greater question at A B C D E F G H I J K L - M N O P Q R S T U V W X Y Z bracketleft backslash bracketright - asciicircum underscore quoteleft a b c d e f g h i j k l m n o p - q r s t u v w x y z braceleft bar braceright asciitilde - sfthyphen Abreve Aogonek Cacute Ccaron Dcaron Ecaron Eogonek - Gbreve Lacute Lcaron Lslash Nacute Ncaron Eng Ohungarumlaut - Racute Rcaron Sacute Scaron Scedilla Tcaron Tcedilla - Uhungarumlaut Uring Ydieresis Zacute Zcaron Zdotaccent IJ - Idotaccent dcroat section abreve aogonek cacute ccaron dcaron - ecaron eogonek gbreve lacute lcaron lslash nacute ncaron eng - ohungarumlaut racute rcaron sacute scaron scedilla tcaron - tcedilla uhungarumlaut uring ydieresis zacute zcaron zdotaccent - ij exclamdown questiondown sterling Agrave Aacute Acircumflex - Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex - Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde Ograve - Oacute Ocircumflex Otilde Odieresis OE Oslash Ugrave Uacute - Ucircumflex Udieresis Yacute Thorn SS agrave aacute acircumflex - atilde adieresis aring ae ccedilla egrave eacute ecircumflex - edieresis igrave iacute icircumflex idieresis eth ntilde ograve - oacute ocircumflex otilde odieresis oe oslash ugrave uacute - ucircumflex udieresis yacute thorn germandbls); - -# Define font parameters that the user can override. -my $fontversion; -my $creationdate; -my $comment; -my $familyname; -my $weight; -my $fullname; -my $fixedpitch; -my $italicangle; -my $underlinepos; -my $underlinethick; -my $fontname; -my $uniqueID; -my $designsize; -my ($mffile, $pt1file, $pfbfile, $ffscript); -my $encoding; -my $rounding; -my $bpppix; - -# Define all of our other global variables. -my $progname = basename $0, ".pl"; -my $mag; -my @fontbbox; -my @charbbox; -my @charwd; -my @glyphname; -my @charfiles; -my $filebase; -my $filedir; -my $filenoext; -my $versionmsg = "mf2pt1 version $VERSION - -Copyright (C) 2008 Scott Pakin - -This program may be distributed and/or modified under the conditions -of the LaTeX Project Public License, either version 1.3c of this -license or (at your option) any later version. - -The latest version of this license is in: - - http://www.latex-project.org/lppl.txt - -and version 1.3c or later is part of all distributions of LaTeX -version 2006/05/20 or later. -"; - - -###################################################################### - -# The routines to compute the fractional approximation of a real number -# are heavily based on code posted by Ben Tilly -# on Nov 16th, 2000, to the -# PerlMonks list. See . - - -# Takes numerator/denominator pairs. -# Returns a PS fraction string representation (with a trailing space). -sub frac_string (@) -{ - my $res = ""; - - while (@_) { - my $n = shift; - my $d = shift; - $res .= $n . " "; - $res .= $d . " div " if $d > 1; - } - - return $res; -} - - -# Takes a number. -# Returns a numerator and denominator with the smallest denominator -# so that the difference of the resulting fraction to the number is -# smaller or equal to $rounding. -sub frac_approx ($) -{ - my $num = shift; - my $f = ret_frac_iter ($num); - - while (1) { - my ($n, $m) = $f->(); - my $approx = $n / $m; - my $delta = abs ($num - $approx); - return ($n, $m) if ($delta <= $rounding); - } -} - - -# Takes a number, returns the best integer approximation and (in list -# context) the error. -sub best_int ($) -{ - my $x = shift; - my $approx = sprintf '%.0f', $x; - if (wantarray) { - return ($approx, $x - $approx); - } - else { - return $approx; - } -} - - -# Takes a numerator and denominator, in scalar context returns -# the best fraction describing them, in list the numerator and -# denominator. -sub frac_standard ($$) -{ - my $n = best_int(shift); - my $m = best_int(shift); - my $k = gcd($n, $m); - $n /= $k; - $m /= $k; - if ($m < 0) { - $n *= -1; - $m *= -1; - } - if (wantarray) { - return ($n, $m); - } - else { - return "$n/$m"; - } -} - - -# Euclidean algorithm for calculating a GCD. -# Takes two integers, returns the greatest common divisor. -sub gcd ($$) -{ - my ($n, $m) = @_; - while ($m) { - my $k = $n % $m; - ($n, $m) = ($m, $k); - } - return $n; -} - - -# Takes a list of terms in a continued fraction, and converts it -# into a fraction. -sub ints_to_frac (@) -{ - my ($n, $m) = (0, 1); # Start with 0 - while (@_) { - my $k = pop; - if ($n) { - # Want frac for $k + 1/($n/$m) - ($n, $m) = frac_standard($k*$n + $m, $n); - } - else { - # Want $k - ($n, $m) = frac_standard($k, 1); - } - } - return frac_standard($n, $m); -} - - -# Takes a number, returns an anon sub which iterates through a set of -# fractional approximations that converges very quickly to the number. -sub ret_frac_iter ($) -{ - my $x = shift; - my $term_iter = ret_next_term_iter($x); - my @ints; - return sub { - push @ints, $term_iter->(); - return ints_to_frac(@ints); - } -} - - -# Terms of a continued fraction converging on that number. -sub ret_next_term_iter ($) -{ - my $x = shift; - return sub { - (my $n, $x) = best_int($x); - if (0 != $x) { - $x = 1/$x; - } - return $n; - } -} - -###################################################################### - -# Round a number to the nearest integer. -sub round ($) -{ - return int($_[0] + 0.5*($_[0] <=> 0)); -} - - -# Round a number to a given precision. -sub prec ($) -{ - return round ($_[0] / $rounding) * $rounding; -} - - -# Set a variable's value to the first defined value in the given list. -# If the variable was not previously defined and no value in the list -# is defined, do nothing. -sub assign_default (\$@) -{ - my $varptr = shift; # Pointer to variable to define - return if defined $$varptr && $$varptr ne "UNSPECIFIED"; - foreach my $val (@_) { - next if !defined $val; - $$varptr = $val; - return; - } -} - - -# Print and execute a shell command. An environment variable with the -# same name as the command overrides the command name. Return 1 on -# success, 0 on failure. Optionally abort if the command fails, based -# on the first argument to execute_command. -sub execute_command ($@) -{ - my $abort_on_failure = shift; - my @command = @_; - $command[0] = $ENV{uc $command[0]} || $command[0]; - my $prettyargs = join (" ", map {/[\\ ]/ ? "'$_'" : $_} @command); - print "Invoking \"$prettyargs\"...\n"; - my $result = system @command; - die "${progname}: \"$prettyargs\" failed ($!)\n" if $result && $abort_on_failure; - return !$result; -} - - -# Output the font header. -sub output_header () -{ - # Show the initial boilerplate. - print OUTFILE <<"ENDHEADER"; -%!FontType1-1.0: $fontname $fontversion -%%CreationDate: $creationdate -% Font converted to Type 1 by mf2pt1, written by Scott Pakin. -11 dict begin -/FontInfo 11 dict dup begin -/version ($fontversion) readonly def -/Notice ($comment) readonly def -/FullName ($fullname) readonly def -/FamilyName ($familyname) readonly def -/Weight ($weight) readonly def -/ItalicAngle $italicangle def -/isFixedPitch $fixedpitch def -/UnderlinePosition $underlinepos def -/UnderlineThickness $underlinethick def -end readonly def -/FontName /$fontname def -ENDHEADER - - # If we're not using an encoding that PostScript knows about, then - # create an encoding vector. - if ($encoding==\@standardencoding) { - print OUTFILE "/Encoding StandardEncoding def\n"; - } - else { - print OUTFILE "/Encoding 256 array\n"; - print OUTFILE "0 1 255 {1 index exch /.notdef put} for\n"; - foreach my $charnum (0 .. $#{$encoding}) { - if ($encoding->[$charnum] && $encoding->[$charnum]!~/^_a\d+$/) { - print OUTFILE "dup $charnum /$encoding->[$charnum] put\n"; - } - } - print OUTFILE "readonly def\n"; - } - - # Show the final boilerplate. - print OUTFILE <<"ENDHEADER"; -/PaintType 0 def -/FontType 1 def -/FontMatrix [0.001 0 0 0.001 0 0] readonly def -/UniqueID $uniqueID def -/FontBBox{@fontbbox}readonly def -currentdict end -currentfile eexec -dup /Private 5 dict dup begin -/RD{string currentfile exch readstring pop}executeonly def -/ND{noaccess def}executeonly def -/NP{noaccess put}executeonly def -ENDHEADER -} - - -# Use MetaPost to generate one PostScript file per character. We -# calculate the font bounding box from these characters and store them -# in @fontbbox. If the input parameter is 1, set other font -# parameters, too. -sub get_bboxes ($) -{ - execute_command 1, ("mpost", "-mem=mf2pt1", "-progname=mpost", - "\\mode:=localfont; mag:=$mag; bpppix $bpppix; input $mffile"); - opendir (CURDIR, ".") || die "${progname}: $! ($filedir)\n"; - @charfiles = sort - { ($a=~ /\.(\d+)$/)[0] <=> ($b=~ /\.(\d+)$/)[0] } - grep /^$filebase.*\.\d+$/, readdir(CURDIR); - close CURDIR; - @fontbbox = (1000000, 1000000, -1000000, -1000000); - foreach my $psfile (@charfiles) { - # Read the character number from the output file's extension. - $psfile =~ /\.(\d+)$/; - my $charnum = $1; - - # Process in turn each line of the current PostScript file. - my $havebbox = 0; - open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n"; - while () { - my @tokens = split " "; - if ($tokens[0] eq "%%BoundingBox:") { - # Store the MetaPost-produced bounding box, just in case - # the given font doesn't use beginchar. - @tokens = ("%", "MF2PT1:", "glyph_dimensions", @tokens[1..4]); - $havebbox--; - } - next if $#tokens<1 || $tokens[1] ne "MF2PT1:"; - - # Process a "special" inserted into the generated PostScript. - MF2PT1_CMD: - { - # glyph_dimensions llx lly urx ury -- specified glyph dimensions - $tokens[2] eq "glyph_dimensions" && do { - my @bbox = @tokens[3..6]; - $fontbbox[0]=$bbox[0] if $bbox[0]<$fontbbox[0]; - $fontbbox[1]=$bbox[1] if $bbox[1]<$fontbbox[1]; - $fontbbox[2]=$bbox[2] if $bbox[2]>$fontbbox[2]; - $fontbbox[3]=$bbox[3] if $bbox[3]>$fontbbox[3]; - $charbbox[$charnum] = \@bbox; - $havebbox++; - last MF2PT1_CMD; - }; - - # If all we want is the bounding box, exit the loop now. - last MF2PT1_CMD if !$_[0]; - - # glyph_name name -- glyph name - $tokens[2] eq "glyph_name" && do { - $glyphname[$charnum] = $tokens[3]; - last MF2PT1_CMD; - }; - - # charwd wd -- character width as in TFM - $tokens[2] eq "charwd" && do { - $charwd[$charnum] = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_identifier name -- full font name - $tokens[2] eq "font_identifier" && do { - $fullname = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_size number -- font design size (pt, not bp) - $tokens[2] eq "font_size" && $tokens[3] && do { - $designsize = $tokens[3] * 72 / 72.27; - last MF2PT1_CMD; - }; - - # font_slant number -- italic amount - $tokens[2] eq "font_slant" && do { - $italicangle = 0 + rad2deg (atan(-$tokens[3])); - last MF2PT1_CMD; - }; - - # font_coding_scheme string -- font encoding - $tokens[2] eq "font_coding_scheme" && do { - $encoding = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_version string -- font version number (xxx.yyy) - $tokens[2] eq "font_version" && do { - $fontversion = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_comment string -- font comment notice - $tokens[2] eq "font_comment" && do { - $comment = join (" ", @tokens[3..$#tokens]); - last MF2PT1_CMD; - }; - - # font_family string -- font family name - $tokens[2] eq "font_family" && do { - $familyname = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_weight string -- font weight (e.g., "Book" or "Heavy") - $tokens[2] eq "font_weight" && do { - $weight = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_fixed_pitch number -- fixed width font (0=false, 1=true) - $tokens[2] eq "font_fixed_pitch" && do { - $fixedpitch = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_underline_position number -- vertical underline position - $tokens[2] eq "font_underline_position" && do { - # We store $underlinepos in points and later - # scale it by 1000/$designsize. - $underlinepos = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_underline_thickness number -- thickness of underline - $tokens[2] eq "font_underline_thickness" && do { - # We store $underlinethick in points and later - # scale it by 1000/$designsize. - $underlinethick = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_name string -- font name - $tokens[2] eq "font_name" && do { - $fontname = $tokens[3]; - last MF2PT1_CMD; - }; - - # font_unique_id number (as string) -- globally unique font ID - $tokens[2] eq "font_unique_id" && do { - $uniqueID = 0+$tokens[3]; - last MF2PT1_CMD; - }; - } - } - close PSFILE; - if (!$havebbox) { - warn "${progname}: No beginchar in character $charnum; glyph dimensions are probably incorrect\n"; - } - } -} - - -# Convert ordinary, MetaPost-produced PostScript files into Type 1 -# font programs. -sub output_font_programs () -{ - # Iterate over all the characters. We convert each one, line by - # line and token by token. - print "Converting PostScript graphics to Type 1 font programs...\n"; - foreach my $psfile (@charfiles) { - # Initialize the font program. - $psfile =~ /\.(\d+)$/; - my $charnum = $1; - my $gname = $glyphname[$charnum] || $encoding->[$charnum]; - my @fontprog; - push @fontprog, ("/$gname {", - frac_string (frac_approx ($charbbox[$charnum]->[0]), - frac_approx ($charwd[$charnum] * $mag)) - . "hsbw"); - my ($cpx, $cpy) = - ($charbbox[$charnum]->[0], 0); # Current point (PostScript) - - # Iterate over every line in the current file. - open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n"; - while (my $oneline=) { - next if $oneline=~/^\%/; - next if $oneline=~/set/; # Fortunately, "set" never occurs on "good" lines. - my @arglist; # Arguments to current PostScript function - - # Iterate over every token in the current line. - TOKENLOOP: - foreach my $token (split " ", $oneline) { - # Number: Round and push on the argument list. - $token =~ /^[-.\d]+$/ && do { - push @arglist, prec ($&); - next TOKENLOOP; - }; - - # curveto: Convert to vhcurveto, hvcurveto, or rrcurveto. - $token eq "curveto" && do { - my ($dx1, $dy1) = ($arglist[0] - $cpx, - $arglist[1] - $cpy); - my ($dx1n, $dx1d) = frac_approx ($dx1); - my ($dy1n, $dy1d) = frac_approx ($dy1); - $cpx += $dx1n / $dx1d; - $cpy += $dy1n / $dy1d; - - my ($dx2, $dy2) = ($arglist[2] - $cpx, - $arglist[3] - $cpy); - my ($dx2n, $dx2d) = frac_approx ($dx2); - my ($dy2n, $dy2d) = frac_approx ($dy2); - $cpx += $dx2n / $dx2d; - $cpy += $dy2n / $dy2d; - - my ($dx3, $dy3) = ($arglist[4] - $cpx, - $arglist[5] - $cpy); - my ($dx3n, $dx3d) = frac_approx ($dx3); - my ($dy3n, $dy3d) = frac_approx ($dy3); - $cpx += $dx3n / $dx3d; - $cpy += $dy3n / $dy3d; - - if (!$dx1n && !$dy3n) { - push @fontprog, frac_string ($dy1n, $dy1d, - $dx2n, $dx2d, - $dy2n, $dy2d, - $dx3n, $dx3d) - . "vhcurveto"; - } - elsif (!$dy1n && !$dx3n) { - push @fontprog, frac_string ($dx1n, $dx1d, - $dx2n, $dx2d, - $dy2n, $dy2d, - $dy3n, $dy3d) - . "hvcurveto"; - } - else { - push @fontprog, frac_string ($dx1n, $dx1d, - $dy1n, $dy1d, - $dx2n, $dx2d, - $dy2n, $dy2d, - $dx3n, $dx3d, - $dy3n, $dy3d) - . "rrcurveto"; - } - next TOKENLOOP; - }; - - # lineto: Convert to vlineto, hlineto, or rlineto. - $token eq "lineto" && do { - my ($dx, $dy) = ($arglist[0] - $cpx, - $arglist[1] - $cpy); - my ($dxn, $dxd) = frac_approx ($dx); - my ($dyn, $dyd) = frac_approx ($dy); - $cpx += $dxn / $dxd; - $cpy += $dyn / $dyd; - - if (!$dxn) { - push @fontprog, frac_string ($dyn, $dyd) - . "vlineto" if $dyn; - } - elsif (!$dyn) { - push @fontprog, frac_string ($dxn, $dxd) - . "hlineto"; - } - else { - push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd) - . "rlineto"; - } - next TOKENLOOP; - }; - - # moveto: Convert to vmoveto, hmoveto, or rmoveto. - $token eq "moveto" && do { - my ($dx, $dy) = ($arglist[0] - $cpx, - $arglist[1] - $cpy); - my ($dxn, $dxd) = frac_approx ($dx); - my ($dyn, $dyd) = frac_approx ($dy); - $cpx += $dxn / $dxd; - $cpy += $dyn / $dyd; - - if (!$dxn) { - push @fontprog, frac_string ($dyn, $dyd) - . "vmoveto"; - } - elsif (!$dyn) { - push @fontprog, frac_string ($dxn, $dxd) - . "hmoveto"; - } - else { - push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd) - . "rmoveto"; - } - next TOKENLOOP; - }; - - # closepath: Output as is. - $token eq "closepath" && do { - push @fontprog, $token; - next TOKENLOOP; - }; - } - } - close PSFILE; - push @fontprog, ("endchar", - "} ND"); - print OUTFILE join ("\n\t", @fontprog), "\n"; - } -} - - -# Output the final set of code for the Type 1 font. -sub output_trailer () -{ - print OUTFILE <<"ENDTRAILER"; -/.notdef { - 0 @{[$fontbbox[2]-$fontbbox[0]]} hsbw - endchar - } ND -end -end -readonly put -noaccess put -dup/FontName get exch definefont pop -mark currentfile closefile -cleartomark -ENDTRAILER -} - -###################################################################### - -# Parse the command line. Asterisks in the following represents -# commands also defined by Plain Metafont. -my %opthash = (); -GetOptions (\%opthash, - "fontversion=s", # font_version - "comment=s", # font_comment - "family=s", # font_family - "weight=s", # font_weight - "fullname=s", # font_identifier (*) - "fixedpitch!", # font_fixed_pitch - "italicangle=f", # font_slant (*) - "underpos=f", # font_underline_position - "underthick=f", # font_underline_thickness - "name=s", # font_name - "uniqueid=i", # font_unique_id - "designsize=f", # font_size (*) - "encoding=s", # font_coding_scheme (*) - "rounding=f", - "bpppix=f", - "ffscript=s", - "h|help", - "V|version") || pod2usage(2); -if (defined $opthash{"h"}) { - pod2usage(-verbose => 1, - -output => \*STDOUT, # Bug workaround for Pod::Usage - -exitval => "NOEXIT"); - print "Please e-mail bug reports to scott+mf\@pakin.org.\n"; - exit 1; -} -do {print $versionmsg; exit 1} if defined $opthash{"V"}; -pod2usage(2) if $#ARGV != 0; - -# Extract the filename from the command line. -$mffile = $ARGV[0]; -my @fileparts = fileparse $mffile, ".mf"; -$filebase = $fileparts[0]; -$filedir = $fileparts[1]; -$filenoext = File::Spec->catfile ($filedir, $filebase); -$pt1file = $filebase . ".pt1"; -$pfbfile = $filebase . ".pfb"; - -assign_default $bpppix, $opthash{bpppix}, 0.02; - -# Make our first pass through the input, to set values for various options. -$mag = 100; # Get a more precise bounding box. -get_bboxes(1); # This might set $designsize. - -# Sanity-check the specified precision. -assign_default $rounding, $opthash{rounding}, 1; -if ($rounding<=0.0 || $rounding>1.0) { - die sprintf "%s: Invalid rounding amount \"%g\"; value must be a positive number no greater than 1.0\n", $progname, $rounding; -} - -# Ensure that every user-definable parameter is assigned a value. -assign_default $fontversion, $opthash{fontversion}, "001.000"; -assign_default $creationdate, scalar localtime; -assign_default $comment, $opthash{comment}, "Font converted to Type 1 by mf2pt1, written by Scott Pakin."; -assign_default $weight, $opthash{weight}, "Medium"; -assign_default $fixedpitch, $opthash{fixedpitch}, 0; -assign_default $uniqueID, $opthash{uniqueid}, int(rand(1000000)) + 4000000; -assign_default $designsize, $opthash{designsize}; -die "${progname}: a design size must be specified in $mffile or on the command line\n" if !defined $designsize; -die "${progname}: the design size must be a positive number\n" if $designsize<=0.0; -assign_default $underlinepos, $opthash{underpos}, -1; -$underlinepos = round(1000*$underlinepos/$designsize); -assign_default $underlinethick, $opthash{underthick}, 0.5; -$underlinethick = round(1000*$underlinethick/$designsize); -assign_default $fullname, $opthash{fullname}, $filebase; -assign_default $familyname, $opthash{family}, $fullname; -assign_default $italicangle, $opthash{italicangle}, 0; -assign_default $fontname, $opthash{name}, "$familyname-$weight"; -$fontname =~ s/\s//g; -assign_default $encoding, $opthash{encoding}, "standard"; -my $encoding_name = $encoding; -ENCODING: -{ - if (-e $encoding) { - # Filenames take precedence over built-in encodings. - my @enc_array; - open (ENCFILE, "<$encoding") || die "${progname}: $! ($encoding)\n"; - while (my $oneline = ) { - $oneline =~ s/\%.*$//; - foreach my $word (split " ", $oneline) { - push @enc_array, substr($word, 1) if substr($word, 0, 1) eq "/"; - } - } - close ENCFILE; - $encoding_name = substr (shift @enc_array, 1); - $encoding = \@enc_array; - last ENCODING; - } - $encoding=\@standardencoding, last ENCODING if $encoding eq "standard"; - $encoding=\@isolatin1encoding, last ENCODING if $encoding eq "isolatin1"; - $encoding=\@ot1encoding, last ENCODING if $encoding eq "ot1"; - $encoding=\@t1encoding, last ENCODING if $encoding eq "t1"; - $encoding=\@glyphname, last ENCODING if $encoding eq "asis"; - warn "${progname}: Unknown encoding \"$encoding\"; using standard Adobe encoding\n"; - $encoding=\@standardencoding; # Default to standard encoding -} -assign_default $fixedpitch, $opthash{fixedpitch}, 0; -$fixedpitch = $fixedpitch ? "true" : "false"; -assign_default $ffscript, $opthash{ffscript}; - -# Output the final values of all of our parameters. -print "\n"; -print <<"PARAMVALUES"; -mf2pt1 is using the following font parameters: - font_version: $fontversion - font_comment: $comment - font_family: $familyname - font_weight: $weight - font_identifier: $fullname - font_fixed_pitch: $fixedpitch - font_slant: $italicangle - font_underline_position: $underlinepos - font_underline_thickness: $underlinethick - font_name: $fontname - font_unique_id: $uniqueID - font_size: $designsize (bp) - font_coding_scheme: $encoding_name -PARAMVALUES - ; -print "\n"; - -# Scale by a factor of 1000/design size. -$mag = 1000.0 / $designsize; -get_bboxes(0); -print "\n"; - -# Output the font in disassembled format. -open (OUTFILE, ">$pt1file") || die "${progname}: $! ($pt1file)\n"; -output_header(); -printf OUTFILE "2 index /CharStrings %d dict dup begin\n", - 1+scalar(grep {defined($_)} @charbbox); -output_font_programs(); -output_trailer(); -close OUTFILE; -unlink @charfiles; -print "\n"; - -# Convert from the disassembled font format to Type 1 binary format. -if (!execute_command 0, ("t1asm", $pt1file, $pfbfile)) { - die "${progname}: You'll need either to install t1utils and rerun $progname or find another way to convert $pt1file to $pfbfile\n"; - exit 1; -} -print "\n"; -unlink $pt1file; - -# Use FontForge to autohint the result. -my $user_script = 0; # 1=script file was provided by the user; 0=created here -if (defined $ffscript) { - # The user provided his own script. - $user_script = 1; -} -else { - # Create a FontForge script file. - $ffscript = $filebase . ".pe"; - open (FFSCRIPT, ">$ffscript") || die "${progname}: $! ($ffscript)\n"; - print FFSCRIPT <<'AUTOHINT'; -Open($1); -SelectAll(); -RemoveOverlap(); -AddExtrema(); -Simplify(0, 2); -CorrectDirection(); -Simplify(0, 2); -RoundToInt(); -AutoHint(); -Generate($1); -Quit(0); -AUTOHINT - ; - close FFSCRIPT; -} -if (!execute_command 0, ("fontforge", "-script", $ffscript, $pfbfile)) { - warn "${progname}: You'll need to install FontForge if you want $pfbfile autohinted (not required, but strongly recommended)\n"; -} -unlink $ffscript if !$user_script; -print "\n"; - -# Finish up. -print "*** Successfully generated $pfbfile! ***\n"; -exit 0; - -###################################################################### - -__END__ - -=head1 NAME - -mf2pt1 - produce a PostScript Type 1 font program from a Metafont source - - -=head1 SYNOPSIS - -mf2pt1 -[B<--help>] -[B<--version>] -[B<--comment>=I] -[B<--designsize>=I] -[B<--encoding>=I] -[B<--family>=I] -[B<-->[B]B] -[B<--fontversion>=I] -[B<--fullname>=I] -[B<--italicangle>=I] -[B<--name>=I] -[B<--underpos>=I] -[B<--underthick>=I] -[B<--uniqueid>=I] -[B<--weight>=I] -[B<--rounding>=I] -[B<--bpppix>=I] -[B<--ffscript>=I] -I.mf - - -=head1 WARNING - -The B Info file is the main source of documentation for -B. This man page is merely a brief summary. - - -=head1 DESCRIPTION - -B facilitates producing PostScript Type 1 fonts from a -Metafont source file. It is I, as the name may imply, an -automatic converter of arbitrary Metafont fonts to Type 1 format. -B imposes a number of restrictions on the Metafont input. If -these restrictions are met, B will produce valid Type 1 -output. (Actually, it produces "disassembled" Type 1; the B -program from the B suite will convert this to a true Type 1 -font.) - -=head2 Usage - - mf2pt1 myfont.mf - -=head1 OPTIONS - -Font parameters are best specified within a Metafont program. If -necessary, though, command-line options can override any of these -parameters. The B Info page, the primary source of B -documentation, describes the following in greater detail. - -=over 4 - -=item B<--help> - -Provide help on B's command-line options. - -=item B<--version> - -Output the B version number, copyright, and license. - -=item B<--comment>=I - -Include a font comment, usually a copyright notice. - -=item B<--designsize>=I - -Specify the font design size in points. - -=item B<--encoding>=I - -Designate the font encoding, either the name of a---typically -F<.enc>---file which contains a PostScript font-encoding vector or one -of C (the default), C, C, or C. - -=item B<--family>=I - -Specify the font family. - -=item B<--fixedpitch>, B<--nofixedpitch> - -Assert that the font uses either monospaced (B<--fixedpitch>) or -proportional (B<--nofixedpitch>) character widths. - -=item B<--fontversion>=I - -Specify the font's major and minor version number. - -=item B<--fullname>=I - -Designate the full font name (family plus modifiers). - -=item B<--italicangle>=I - -Designate the italic angle in degrees counterclockwise from vertical. - -=item B<--name>=I - -Provide the font name. - -=item B<--underpos>=I - -Specify the vertical position of the underline in thousandths of the -font height. - -=item B<--underthick>=I - -Specify the thickness of the underline in thousandths of the font -height. - -=item B<--uniqueid>=I - -Specify a globally unique font identifier. - -=item B<--weight>=I - -Provide a description of the font weight (e.g., ``Heavy''). - -=item B<--rounding>=I - -Specify the fraction of a font unit (0.0 < I <= 1.0) to which -to round coordinate values [default: 1.0]. - -=item B<--bpppix>=I - -Redefine the number of big points per pixel from 0.02 to I. - -=item B<--ffscript>=I - -Name a script to pass to FontForge. - -=back - - -=head1 FILES - -F (which is generated from F and F) - - -=head1 NOTES - -As stated in L, the complete source of documentation for -B is the Info page, not this man page. - - -=head1 SEE ALSO - -mf(1), mpost(1), t1asm(1), fontforge(1) - - -=head1 AUTHOR - -Scott Pakin, I diff --git a/buildscripts/mirrortree.py b/buildscripts/mirrortree.py deleted file mode 100644 index 0aa0bc8812..0000000000 --- a/buildscripts/mirrortree.py +++ /dev/null @@ -1,62 +0,0 @@ -#!@PYTHON@ - -import re -import os - -def new_link_path (link, dir, r): - l = link.split ('/') - d = dir.split ('/') - i = 0 - while i < len(d) and i < len(l) and l[i] == '..': - if r.match (d[i]): - del l[i] - else: - i += 1 - return '/'.join ([x for x in l if not r.match (x)]) - -def walk_tree (tree_roots = [], - process_dirs = '.*', - exclude_dirs = '', - find_files = '.*', - exclude_files = ''): - """Walk directory trees and.returns (dirs, symlinks, files, extra_files) tuple. - - Arguments: - tree_roots=DIRLIST use DIRLIST as tree roots list - process_dir=PATTERN only process files in directories named PATTERN - exclude_dir=PATTERN don't recurse into directories named PATTERN - find_files=PATTERN filters files which are hardlinked - exclude_files=PATTERN exclude files named PATTERN - """ - find_files_re = re.compile (find_files) - exclude_dirs_re = re.compile (exclude_dirs) - exclude_files_re = re.compile (exclude_files) - process_dirs_re = re.compile (process_dirs) - - dirs_paths = [] - symlinks_paths = [] - files_paths = [] - - for d in tree_roots: - for current_dir, dirs, files in os.walk(d): - i = 0 - while i < len(dirs): - if exclude_dirs_re.search (os.path.join (current_dir, dirs[i])): - del dirs[i] - else: - p = os.path.join (current_dir, dirs[i]) - if os.path.islink (p): - symlinks_paths.append (p) - i += 1 - if not process_dirs_re.search (current_dir): - continue - dirs_paths.append (current_dir) - for f in files: - if exclude_files_re.match (f): - continue - p = os.path.join (current_dir, f) - if os.path.islink (p): - symlinks_paths.append (p) - elif find_files_re.match (f): - files_paths.append (p) - return (dirs_paths, symlinks_paths, files_paths) diff --git a/buildscripts/musicxml_generate_intervals.py b/buildscripts/musicxml_generate_intervals.py deleted file mode 100644 index 3c00715d14..0000000000 --- a/buildscripts/musicxml_generate_intervals.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python - -notes = "CDEFGAB" -alterations = [-1, 0, 1] - -def print_note (octave, note, alteration): - print " \n \n %s" % notes[note] - if alteration <> 0: - print " %s" % alteration - print " %s\n \n 1\n 1\n quarter\n " % octave - - -print """ - - - Various piches and interval sizes - - - MusicXML Part - - - - - - - 1 - - 0 - major - - - - G - 2 - - -""" - -start_octave = 5 - -for octave in (start_octave, start_octave+1): - for note in (0,1,2,3,4,5,6): - for alteration in alterations: - if octave == start_octave and note == 0 and alteration == -1: - continue - print_note (octave, note, alteration) -# if octave == start_octave and note == 0 and alteration == 0: -# continue - print_note (start_octave-(octave-start_octave)-(1-(7-note)/7), (7-note)%7, -alteration) - -print """ - - -""" diff --git a/buildscripts/musicxml_generate_keys.py b/buildscripts/musicxml_generate_keys.py deleted file mode 100644 index 7a16ac987f..0000000000 --- a/buildscripts/musicxml_generate_keys.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python - -notes = "CDEFGAB" -alterations = [-1, 0, 1] - -def print_measure (nr, fifth, mode, atts1 = "", atts = "", final = ""): - print """ - -%s - %s - %s - -%s - - - C - 4 - - 2 - 1 - half - -%s """ % (nr, atts1, fifth, mode, atts, final) - -first_div = """ 1 -""" -first_atts = """ - - G - 2 - -""" - -final_barline = """ - light-heavy - -""" - -print """ - - - Different Key signatures - - - Various key signature: from 11 - flats to 11 sharps (each one first one measure in major, then one - measure in minor) - - - - - MusicXML Part - - - - """ - -max_range = 11 -measure = 0 -for fifth in range(-max_range, max_range+1): - measure += 1 - if fifth == -max_range: - print_measure (measure, fifth, "major", first_div, first_atts) - else: - print_measure (measure, fifth, "major") - measure += 1 - if fifth == max_range: - print_measure (measure, fifth, "minor", "", "", final_barline) - else: - print_measure (measure, fifth, "minor") - - -print """ -""" diff --git a/buildscripts/musicxml_generate_timesignatures.py b/buildscripts/musicxml_generate_timesignatures.py deleted file mode 100644 index c4cc78a103..0000000000 --- a/buildscripts/musicxml_generate_timesignatures.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/env python - -notes = "CDEFGAB" -alterations = [-1, 0, 1] - -dot_xml = """ -""" -tie_xml = """ -""" -tie_notation_xml = """ -""" - - -def generate_note (duration, end_tie = False): - if duration < 2: - (notetype, dur) = ("8th", 1) - elif duration < 4: - (notetype, dur) = ("quarter", 2) - elif duration < 8: - (notetype, dur) = ("half", 4) - else: - (notetype, dur) = ("whole", 8) - dur_processed = dur - dot = "" - if (duration - dur_processed >= dur/2): - dot = dot_xml - dur_processed += dur/2 - if (duration - dur_processed >= max(dur/4, 1)): - dot += dot_xml - dur_processed += dur/4 - tie = "" - tie_notation = "" - if end_tie: - tie += tie_xml % "stop" - tie_notation += tie_notation_xml % "stop" - second_note = None - if duration - dur_processed > 0: - second_note = generate_note (duration-dur_processed, True) - tie += tie_xml % "start" - tie_notation += tie_notation_xml % "start" - note = """ - - C - 5 - - %s -%s 1 - %s -%s%s """ % (dur_processed, tie, notetype, dot, tie_notation) - if second_note: - return "%s\n%s" % (note, second_note) - else: - return note - -def print_measure (nr, beats, type, params = "", attr = "", attr2 = "", barline = ""): - duration = 8*beats/type - note = generate_note (duration) - - print """ - -%s - %s - %s - -%s -%s -%s """ % (nr, attr, params, beats, type, attr2, note, barline) - -first_key = """ 2 - - 0 - major - -""" -first_clef = """ - G - 2 - -""" - -final_barline = """ - light-heavy - -""" - -print """ - - - - - Various time signatures: 2/2 - (alla breve), 4/4 (C), 2/2, 3/2, 2/4, 3/4, 4/4, 5/4, 3/8, 6/8, - 12/8 - - - - - MusicXML Part - - - - """ - -measure = 1 - -print_measure (measure, 2, 2, " symbol=\"common\"", first_key, first_clef) -measure += 1 - -print_measure (measure, 4, 4, " symbol=\"common\"") -measure += 1 - -print_measure (measure, 2, 2) -measure += 1 - -print_measure (measure, 3, 2) -measure += 1 - -print_measure (measure, 2, 4) -measure += 1 - -print_measure (measure, 3, 4) -measure += 1 - -print_measure (measure, 4, 4) -measure += 1 - -print_measure (measure, 5, 4) -measure += 1 - -print_measure (measure, 3, 8) -measure += 1 - -print_measure (measure, 6, 8) -measure += 1 - -print_measure (measure, 12, 8, "", "", "", final_barline) -measure += 1 - -print """ -""" diff --git a/buildscripts/mutopia-index.py b/buildscripts/mutopia-index.py deleted file mode 100644 index 50e4ebbf29..0000000000 --- a/buildscripts/mutopia-index.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/env python -# mutopia-index.py - -import fnmatch -import getopt -import os -import re -import stat -import sys - -def find (pat, dir): - f = os.popen ('find %s -name "%s"'% (dir, pat)) - lst = [] - for a in f.readlines(): - a = a[:-1] - lst.append (a) - return lst - - -junk_prefix = 'out-www/' - -headertext= r""" - -

    LilyPond samples

    - - -

    You are looking at a page with some LilyPond samples. These files -are also included in the distribution. The output is completely -generated from the source file, without any further touch up. - -

    - -The pictures are 90 dpi anti-aliased snapshots of the printed output. -For a good impression of the quality print out the PDF file. -""" - -headertext_nopics= r""" -

    No examples were found in this directory. -""" - -# -# FIXME breaks on multiple strings. -# -def read_lilypond_header (fn): - s = open (fn).read () - s = re.sub ('%.*$', '', s) - s = re.sub ('\n', ' ', s) - - dict = {} - m = re.search (r"""\\header\s*{([^}]*)}""", s) - - if m: - s = m.group (1) - else: - return dict - - while s: - m = re.search (r'''\s*(\S+)\s*=\s*"([^"]+)"''', s) - if m == None: - s = '' - else: - s = s[m.end (0):] - left = m.group (1) - right = m.group (2) - - left = re.sub ('"', '', left) - right = re.sub ('"', '', right) - dict[left] = right - - return dict - -def help (): - sys.stdout.write (r'''Usage: mutopia-index [OPTIONS] INFILE OUTFILE -Generate index for mutopia. - -Options: - -h, --help print this help - -o, --output=FILE write output to file - -s, --subdirs=DIR add subdir - --suffix=SUF specify suffix - -''') - sys.exit (0) - -# ugh. -def gen_list (inputs, file_name): - sys.stderr.write ("generating HTML list %s" % file_name) - sys.stderr.write ('\n') - if file_name: - list = open (file_name, 'w') - else: - list = sys.stdout - list.write ('''Rendered Examples - - -''') - - list.write ('\n') - - if inputs: - list.write (headertext) - else: - list.write (headertext_nopics) - - for ex in inputs: - print ex - - (base, ext) = os.path.splitext (ex) - (base, ext2) = os.path.splitext (base) - ext = ext2 + ext - - header = read_lilypond_header (ex) - head = header.get ('title', os.path.basename (base)) - composer = header.get ('composer', '') - desc = header.get ('description', '') - list.write ('


    \n') - list.write ('

    %s

    \n' % head); - if composer: - list.write ('

    %s

    \n' % composer) - if desc: - list.write ('%s

    ' % desc) - list.write ('

      \n') - - def list_item (file_name, desc, type, lst = list): - if os.path.isfile (file_name): - lst.write ('
    • %s' - % (re.sub (junk_prefix, '', file_name), desc)) - - # FIXME: include warning if it uses \include - # files. - - size = os.stat (file_name)[stat.ST_SIZE] - kB = (size + 512) / 1024 - if kB: - lst.write (' (%s %d kB)' % (type, kB)) - else: - lst.write (' (%s %d characters)' - % (type, size)) - pictures = ['jpeg', 'png', 'xpm'] - lst.write ('\n') - else: - print "cannot find" , `file_name` - - list_item (base + ext, 'The input', 'ASCII') - - pages_found = 0 - for page in range (1, 100): - f = base + '-page%d.png' % page - - if not os.path.isfile (f): - break - pages_found += 1 - list_item (f, 'See a picture of page %d' % page, 'png') - - if pages_found == 0 and os.path.exists (base + '.png'): - list_item (base + ".png", - 'See a picture', 'png') - - - list_item (base + '.pdf', 'Print', 'PDF') - list_item (base + '.midi', 'Listen', 'MIDI') - list.write ('
    \n'); - - list.write ('\n'); - list.close () - -(options, files) = getopt.getopt (sys.argv[1:], - 'ho:', ['help', 'output=']) -outfile = 'examples.html' - -subdirs = [] -for (o, a) in options: - if o == '--help' or o == '-h': - help () - elif o == '--output' or o == '-o': - outfile = a - -dirs = [] -for f in files: - dirs += find ('out-www', f) - -if not dirs: - dirs = ['.'] - -allfiles = [] - -for d in dirs: - allfiles += find ('*.ly', d) - -allfiles = [f for f in allfiles - if not f.endswith ('snippet-map.ly') - and not re.search ('lily-[0-9a-f]+', f) - and 'musicxml' not in f] - -gen_list (allfiles, outfile) diff --git a/buildscripts/output-distance.py b/buildscripts/output-distance.py deleted file mode 100644 index 8586d2481a..0000000000 --- a/buildscripts/output-distance.py +++ /dev/null @@ -1,1262 +0,0 @@ -#!@TARGET_PYTHON@ -import sys -import optparse -import os -import math - -## so we can call directly as buildscripts/output-distance.py -me_path = os.path.abspath (os.path.split (sys.argv[0])[0]) -sys.path.insert (0, me_path + '/../python/') -sys.path.insert (0, me_path + '/../python/out/') - - -X_AXIS = 0 -Y_AXIS = 1 -INFTY = 1e6 - -OUTPUT_EXPRESSION_PENALTY = 1 -ORPHAN_GROB_PENALTY = 1 -options = None - -################################################################ -# system interface. -temp_dir = None -class TempDirectory: - def __init__ (self): - import tempfile - self.dir = tempfile.mkdtemp () - print 'dir is', self.dir - def __del__ (self): - print 'rm -rf %s' % self.dir - os.system ('rm -rf %s' % self.dir) - def __call__ (self): - return self.dir - - -def get_temp_dir (): - global temp_dir - if not temp_dir: - temp_dir = TempDirectory () - return temp_dir () - -def read_pipe (c): - print 'pipe' , c - return os.popen (c).read () - -def system (c): - print 'system' , c - s = os.system (c) - if s : - raise Exception ("failed") - return - -def shorten_string (s): - threshold = 15 - if len (s) > 2*threshold: - s = s[:threshold] + '..' + s[-threshold:] - return s - -def max_distance (x1, x2): - dist = 0.0 - - for (p,q) in zip (x1, x2): - dist = max (abs (p-q), dist) - - return dist - - -def compare_png_images (old, new, dest_dir): - def png_dims (f): - m = re.search ('([0-9]+) x ([0-9]+)', read_pipe ('file %s' % f)) - - return tuple (map (int, m.groups ())) - - dest = os.path.join (dest_dir, new.replace ('.png', '.compare.jpeg')) - try: - dims1 = png_dims (old) - dims2 = png_dims (new) - except AttributeError: - ## hmmm. what to do? - system ('touch %(dest)s' % locals ()) - return - - dims = (min (dims1[0], dims2[0]), - min (dims1[1], dims2[1])) - - dir = get_temp_dir () - system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop1.png' % (dims + (old, dir))) - system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop2.png' % (dims + (new, dir))) - - system ('compare -depth 8 %(dir)s/crop1.png %(dir)s/crop2.png %(dir)s/diff.png' % locals ()) - - system ("convert -depth 8 %(dir)s/diff.png -blur 0x3 -negate -channel alpha,blue -type TrueColorMatte -fx 'intensity' %(dir)s/matte.png" % locals ()) - - system ("composite -compose atop -quality 65 %(dir)s/matte.png %(new)s %(dest)s" % locals ()) - - -################################################################ -# interval/bbox arithmetic. - -empty_interval = (INFTY, -INFTY) -empty_bbox = (empty_interval, empty_interval) - -def interval_is_empty (i): - return i[0] > i[1] - -def interval_length (i): - return max (i[1]-i[0], 0) - -def interval_union (i1, i2): - return (min (i1[0], i2[0]), - max (i1[1], i2[1])) - -def interval_intersect (i1, i2): - return (max (i1[0], i2[0]), - min (i1[1], i2[1])) - -def bbox_is_empty (b): - return (interval_is_empty (b[0]) - or interval_is_empty (b[1])) - -def bbox_union (b1, b2): - return (interval_union (b1[X_AXIS], b2[X_AXIS]), - interval_union (b2[Y_AXIS], b2[Y_AXIS])) - -def bbox_intersection (b1, b2): - return (interval_intersect (b1[X_AXIS], b2[X_AXIS]), - interval_intersect (b2[Y_AXIS], b2[Y_AXIS])) - -def bbox_area (b): - return interval_length (b[X_AXIS]) * interval_length (b[Y_AXIS]) - -def bbox_diameter (b): - return max (interval_length (b[X_AXIS]), - interval_length (b[Y_AXIS])) - - -def difference_area (a, b): - return bbox_area (a) - bbox_area (bbox_intersection (a,b)) - -class GrobSignature: - def __init__ (self, exp_list): - (self.name, self.origin, bbox_x, - bbox_y, self.output_expression) = tuple (exp_list) - - self.bbox = (bbox_x, bbox_y) - self.centroid = (bbox_x[0] + bbox_x[1], bbox_y[0] + bbox_y[1]) - - def __repr__ (self): - return '%s: (%.2f,%.2f), (%.2f,%.2f)\n' % (self.name, - self.bbox[0][0], - self.bbox[0][1], - self.bbox[1][0], - self.bbox[1][1]) - - def axis_centroid (self, axis): - return apply (sum, self.bbox[axis]) / 2 - - def centroid_distance (self, other, scale): - return max_distance (self.centroid, other.centroid) / scale - - def bbox_distance (self, other): - divisor = bbox_area (self.bbox) + bbox_area (other.bbox) - - if divisor: - return (difference_area (self.bbox, other.bbox) + - difference_area (other.bbox, self.bbox)) / divisor - else: - return 0.0 - - def expression_distance (self, other): - if self.output_expression == other.output_expression: - return 0 - else: - return 1 - -################################################################ -# single System. - -class SystemSignature: - def __init__ (self, grob_sigs): - d = {} - for g in grob_sigs: - val = d.setdefault (g.name, []) - val += [g] - - self.grob_dict = d - self.set_all_bbox (grob_sigs) - - def set_all_bbox (self, grobs): - self.bbox = empty_bbox - for g in grobs: - self.bbox = bbox_union (g.bbox, self.bbox) - - def closest (self, grob_name, centroid): - min_d = INFTY - min_g = None - try: - grobs = self.grob_dict[grob_name] - - for g in grobs: - d = max_distance (g.centroid, centroid) - if d < min_d: - min_d = d - min_g = g - - - return min_g - - except KeyError: - return None - def grobs (self): - return reduce (lambda x,y: x+y, self.grob_dict.values(), []) - -################################################################ -## comparison of systems. - -class SystemLink: - def __init__ (self, system1, system2): - self.system1 = system1 - self.system2 = system2 - - self.link_list_dict = {} - self.back_link_dict = {} - - - ## pairs - self.orphans = [] - - ## pair -> distance - self.geo_distances = {} - - ## pairs - self.expression_changed = [] - - self._geometric_distance = None - self._expression_change_count = None - self._orphan_count = None - - for g in system1.grobs (): - - ## skip empty bboxes. - if bbox_is_empty (g.bbox): - continue - - closest = system2.closest (g.name, g.centroid) - - self.link_list_dict.setdefault (closest, []) - self.link_list_dict[closest].append (g) - self.back_link_dict[g] = closest - - - def calc_geometric_distance (self): - total = 0.0 - for (g1,g2) in self.back_link_dict.items (): - if g2: - d = g1.bbox_distance (g2) - if d: - self.geo_distances[(g1,g2)] = d - - total += d - - self._geometric_distance = total - - def calc_orphan_count (self): - count = 0 - for (g1, g2) in self.back_link_dict.items (): - if g2 == None: - self.orphans.append ((g1, None)) - - count += 1 - - self._orphan_count = count - - def calc_output_exp_distance (self): - d = 0 - for (g1,g2) in self.back_link_dict.items (): - if g2: - d += g1.expression_distance (g2) - - self._expression_change_count = d - - def output_expression_details_string (self): - return ', '.join ([g1.name for g1 in self.expression_changed]) - - def geo_details_string (self): - results = [(d, g1,g2) for ((g1, g2), d) in self.geo_distances.items()] - results.sort () - results.reverse () - - return ', '.join (['%s: %f' % (g1.name, d) for (d, g1, g2) in results]) - - def orphan_details_string (self): - return ', '.join (['%s-None' % g1.name for (g1,g2) in self.orphans if g2==None]) - - def geometric_distance (self): - if self._geometric_distance == None: - self.calc_geometric_distance () - return self._geometric_distance - - def orphan_count (self): - if self._orphan_count == None: - self.calc_orphan_count () - - return self._orphan_count - - def output_expression_change_count (self): - if self._expression_change_count == None: - self.calc_output_exp_distance () - return self._expression_change_count - - def distance (self): - return (self.output_expression_change_count (), - self.orphan_count (), - self.geometric_distance ()) - -def read_signature_file (name): - print 'reading', name - - entries = open (name).read ().split ('\n') - def string_to_tup (s): - return tuple (map (float, s.split (' '))) - - def string_to_entry (s): - fields = s.split('@') - fields[2] = string_to_tup (fields[2]) - fields[3] = string_to_tup (fields[3]) - - return tuple (fields) - - entries = [string_to_entry (e) for e in entries - if e and not e.startswith ('#')] - - grob_sigs = [GrobSignature (e) for e in entries] - sig = SystemSignature (grob_sigs) - return sig - - -################################################################ -# different systems of a .ly file. - -hash_to_original_name = {} - -class FileLink: - def __init__ (self, f1, f2): - self._distance = None - self.file_names = (f1, f2) - - def text_record_string (self): - return '%-30f %-20s\n' % (self.distance (), - self.name () - + os.path.splitext (self.file_names[1])[1] - ) - - def calc_distance (self): - return 0.0 - - def distance (self): - if self._distance == None: - self._distance = self.calc_distance () - - return self._distance - - def source_file (self): - for ext in ('.ly', '.ly.txt'): - base = os.path.splitext (self.file_names[1])[0] - f = base + ext - if os.path.exists (f): - return f - - return '' - - def name (self): - base = os.path.basename (self.file_names[1]) - base = os.path.splitext (base)[0] - base = hash_to_original_name.get (base, base) - base = os.path.splitext (base)[0] - return base - - def extension (self): - return os.path.splitext (self.file_names[1])[1] - - def link_files_for_html (self, dest_dir): - for f in self.file_names: - link_file (f, os.path.join (dest_dir, f)) - - def get_distance_details (self): - return '' - - def get_cell (self, oldnew): - return '' - - def get_file (self, oldnew): - return self.file_names[oldnew] - - def html_record_string (self, dest_dir): - dist = self.distance() - - details = self.get_distance_details () - if details: - details_base = os.path.splitext (self.file_names[1])[0] - details_base += '.details.html' - fn = dest_dir + '/' + details_base - open_write_file (fn).write (details) - - details = '
    (details)' % locals () - - cell1 = self.get_cell (0) - cell2 = self.get_cell (1) - - name = self.name () + self.extension () - file1 = self.get_file (0) - file2 = self.get_file (1) - - return ''' - -%(dist)f -%(details)s - -%(cell1)s
    %(name)s -%(cell2)s
    %(name)s -''' % locals () - - -class FileCompareLink (FileLink): - def __init__ (self, f1, f2): - FileLink.__init__ (self, f1, f2) - self.contents = (self.get_content (self.file_names[0]), - self.get_content (self.file_names[1])) - - - def calc_distance (self): - ## todo: could use import MIDI to pinpoint - ## what & where changed. - - if self.contents[0] == self.contents[1]: - return 0.0 - else: - return 100.0; - - def get_content (self, f): - print 'reading', f - s = open (f).read () - return s - - -class GitFileCompareLink (FileCompareLink): - def get_cell (self, oldnew): - str = self.contents[oldnew] - - # truncate long lines - str = '\n'.join ([l[:80] for l in str.split ('\n')]) - - - str = '
    %s
    ' % str - return str - - def calc_distance (self): - if self.contents[0] == self.contents[1]: - d = 0.0 - else: - d = 1.0001 *options.threshold - - return d - - -class TextFileCompareLink (FileCompareLink): - def calc_distance (self): - import difflib - diff = difflib.unified_diff (self.contents[0].strip().split ('\n'), - self.contents[1].strip().split ('\n'), - fromfiledate = self.file_names[0], - tofiledate = self.file_names[1] - ) - - self.diff_lines = [l for l in diff] - self.diff_lines = self.diff_lines[2:] - - return math.sqrt (float (len ([l for l in self.diff_lines if l[0] in '-+']))) - - def get_cell (self, oldnew): - str = '' - if oldnew == 1: - str = '\n'.join ([d.replace ('\n','') for d in self.diff_lines]) - str = '
    %s
    ' % str - return str - -class LogFileCompareLink (TextFileCompareLink): - def get_content (self, f): - c = TextFileCompareLink.get_content (self, f) - c = re.sub ("\nProcessing `[^\n]+'\n", '', c) - return c - -class ProfileFileLink (FileCompareLink): - def __init__ (self, f1, f2): - FileCompareLink.__init__ (self, f1, f2) - self.results = [{}, {}] - - def get_cell (self, oldnew): - str = '' - for k in ('time', 'cells'): - if oldnew==0: - str += '%-8s: %d\n' % (k, int (self.results[oldnew][k])) - else: - str += '%-8s: %8d (%5.3f)\n' % (k, int (self.results[oldnew][k]), - self.get_ratio (k)) - - return '
    %s
    ' % str - - def get_ratio (self, key): - (v1,v2) = (self.results[0].get (key, -1), - self.results[1].get (key, -1)) - - if v1 <= 0 or v2 <= 0: - return 0.0 - - return (v1 - v2) / float (v1+v2) - - def calc_distance (self): - for oldnew in (0,1): - def note_info (m): - self.results[oldnew][m.group(1)] = float (m.group (2)) - - re.sub ('([a-z]+): ([-0-9.]+)\n', - note_info, self.contents[oldnew]) - - dist = 0.0 - factor = { - 'time': 0.1, - 'cells': 5.0, - } - - for k in ('time', 'cells'): - real_val = math.tan (self.get_ratio (k) * 0.5 * math.pi) - dist += math.exp (math.fabs (real_val) * factor[k]) - 1 - - dist = min (dist, 100) - return dist - - -class MidiFileLink (TextFileCompareLink): - def get_content (self, oldnew): - import midi - - data = FileCompareLink.get_content (self, oldnew) - midi = midi.parse (data) - tracks = midi[1] - - str = '' - j = 0 - for t in tracks: - str += 'track %d' % j - j += 1 - - for e in t: - ev_str = repr (e) - if re.search ('LilyPond [0-9.]+', ev_str): - continue - - str += ' ev %s\n' % `e` - return str - - - -class SignatureFileLink (FileLink): - def __init__ (self, f1, f2 ): - FileLink.__init__ (self, f1, f2) - self.system_links = {} - - def add_system_link (self, link, number): - self.system_links[number] = link - - def calc_distance (self): - d = 0.0 - - orphan_distance = 0.0 - for l in self.system_links.values (): - d = max (d, l.geometric_distance ()) - orphan_distance += l.orphan_count () - - return d + orphan_distance - - def add_file_compare (self, f1, f2): - system_index = [] - - def note_system_index (m): - system_index.append (int (m.group (1))) - return '' - - base1 = re.sub ("-([0-9]+).signature", note_system_index, f1) - base2 = re.sub ("-([0-9]+).signature", note_system_index, f2) - - self.base_names = (os.path.normpath (base1), - os.path.normpath (base2)) - - s1 = read_signature_file (f1) - s2 = read_signature_file (f2) - - link = SystemLink (s1, s2) - - self.add_system_link (link, system_index[0]) - - - def create_images (self, dest_dir): - - files_created = [[], []] - for oldnew in (0, 1): - pat = self.base_names[oldnew] + '.eps' - - for f in glob.glob (pat): - infile = f - outfile = (dest_dir + '/' + f).replace ('.eps', '.png') - data_option = '' - if options.local_data_dir: - data_option = ('-slilypond-datadir=%s/../share/lilypond/current ' - % os.path.dirname(infile)) - - mkdir (os.path.split (outfile)[0]) - cmd = ('gs -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4 ' - ' %(data_option)s ' - ' -r101 ' - ' -sOutputFile=%(outfile)s -dNOSAFER -dEPSCrop -q -dNOPAUSE ' - ' %(infile)s -c quit ') % locals () - - files_created[oldnew].append (outfile) - system (cmd) - - return files_created - - def link_files_for_html (self, dest_dir): - FileLink.link_files_for_html (self, dest_dir) - to_compare = [[], []] - - exts = [] - if options.create_images: - to_compare = self.create_images (dest_dir) - else: - exts += ['.png', '-page*png'] - - for ext in exts: - for oldnew in (0,1): - for f in glob.glob (self.base_names[oldnew] + ext): - dst = dest_dir + '/' + f - link_file (f, dst) - - if f.endswith ('.png'): - to_compare[oldnew].append (f) - - if options.compare_images: - for (old, new) in zip (to_compare[0], to_compare[1]): - compare_png_images (old, new, dest_dir) - - - def get_cell (self, oldnew): - def img_cell (ly, img, name): - if not name: - name = 'source' - else: - name = '%s' % name - - return ''' -
    - -
    -''' % locals () - def multi_img_cell (ly, imgs, name): - if not name: - name = 'source' - else: - name = '%s' % name - - imgs_str = '\n'.join ([''' - -
    ''' % (img, img) - for img in imgs]) - - - return ''' -%(imgs_str)s -''' % locals () - - - - def cell (base, name): - pat = base + '-page*.png' - pages = glob.glob (pat) - - if pages: - return multi_img_cell (base + '.ly', sorted (pages), name) - else: - return img_cell (base + '.ly', base + '.png', name) - - - - str = cell (os.path.splitext (self.file_names[oldnew])[0], self.name ()) - if options.compare_images and oldnew == 1: - str = str.replace ('.png', '.compare.jpeg') - - return str - - - def get_distance_details (self): - systems = self.system_links.items () - systems.sort () - - html = "" - for (c, link) in systems: - e = '%d' % c - for d in link.distance (): - e += '%f' % d - - e = '%s' % e - - html += e - - e = '%d' % c - for s in (link.output_expression_details_string (), - link.orphan_details_string (), - link.geo_details_string ()): - e += "%s" % s - - - e = '%s' % e - html += e - - original = self.name () - html = ''' - -comparison details for %(original)s - - - - - - - - - - -%(html)s -
    systemoutputorphangeo
    - - - -''' % locals () - return html - - -################################################################ -# Files/directories - -import glob -import re - -def compare_signature_files (f1, f2): - s1 = read_signature_file (f1) - s2 = read_signature_file (f2) - - return SystemLink (s1, s2).distance () - -def paired_files (dir1, dir2, pattern): - """ - Search DIR1 and DIR2 for PATTERN. - - Return (PAIRED, MISSING-FROM-2, MISSING-FROM-1) - - """ - - files = [] - for d in (dir1,dir2): - found = [os.path.split (f)[1] for f in glob.glob (d + '/' + pattern)] - found = dict ((f, 1) for f in found) - files.append (found) - - pairs = [] - missing = [] - for f in files[0]: - try: - files[1].pop (f) - pairs.append (f) - except KeyError: - missing.append (f) - - return (pairs, files[1].keys (), missing) - -class ComparisonData: - def __init__ (self): - self.result_dict = {} - self.missing = [] - self.added = [] - self.file_links = {} - - def read_sources (self): - - ## ugh: drop the .ly.txt - for (key, val) in self.file_links.items (): - - def note_original (match, ln=val): - key = ln.name () - hash_to_original_name[key] = match.group (1) - return '' - - sf = val.source_file () - if sf: - re.sub (r'\\sourcefilename "([^"]+)"', - note_original, open (sf).read ()) - else: - print 'no source for', val - - def compare_trees (self, dir1, dir2): - self.compare_directories (dir1, dir2) - - (root, dirs, files) = os.walk (dir1).next () - for d in dirs: - d1 = os.path.join (dir1, d) - d2 = os.path.join (dir2, d) - - if os.path.islink (d1) or os.path.islink (d2): - continue - - if os.path.isdir (d2): - self.compare_trees (d1, d2) - - def compare_directories (self, dir1, dir2): - for ext in ['signature', - 'midi', - 'log', - 'profile', - 'gittxt']: - (paired, m1, m2) = paired_files (dir1, dir2, '*.' + ext) - - self.missing += [(dir1, m) for m in m1] - self.added += [(dir2, m) for m in m2] - - for p in paired: - if (options.max_count - and len (self.file_links) > options.max_count): - continue - - f2 = dir2 + '/' + p - f1 = dir1 + '/' + p - self.compare_files (f1, f2) - - def compare_files (self, f1, f2): - if f1.endswith ('signature'): - self.compare_signature_files (f1, f2) - else: - ext = os.path.splitext (f1)[1] - klasses = { - '.midi': MidiFileLink, - '.log' : LogFileCompareLink, - '.profile': ProfileFileLink, - '.gittxt': GitFileCompareLink, - } - - if klasses.has_key (ext): - self.compare_general_files (klasses[ext], f1, f2) - - def compare_general_files (self, klass, f1, f2): - name = os.path.split (f1)[1] - - file_link = klass (f1, f2) - self.file_links[name] = file_link - - def compare_signature_files (self, f1, f2): - name = os.path.split (f1)[1] - name = re.sub ('-[0-9]+.signature', '', name) - - file_link = None - try: - file_link = self.file_links[name] - except KeyError: - generic_f1 = re.sub ('-[0-9]+.signature', '.ly', f1) - generic_f2 = re.sub ('-[0-9]+.signature', '.ly', f2) - file_link = SignatureFileLink (generic_f1, generic_f2) - self.file_links[name] = file_link - - file_link.add_file_compare (f1, f2) - - def write_changed (self, dest_dir, threshold): - (changed, below, unchanged) = self.thresholded_results (threshold) - - str = '\n'.join ([os.path.splitext (link.file_names[1])[0] - for link in changed]) - fn = dest_dir + '/changed.txt' - - open_write_file (fn).write (str) - - def thresholded_results (self, threshold): - ## todo: support more scores. - results = [(link.distance(), link) - for link in self.file_links.values ()] - results.sort () - results.reverse () - - unchanged = [r for (d,r) in results if d == 0.0] - below = [r for (d,r) in results if threshold >= d > 0.0] - changed = [r for (d,r) in results if d > threshold] - - return (changed, below, unchanged) - - def write_text_result_page (self, filename, threshold): - out = None - if filename == '': - out = sys.stdout - else: - print 'writing "%s"' % filename - out = open_write_file (filename) - - (changed, below, unchanged) = self.thresholded_results (threshold) - - - for link in changed: - out.write (link.text_record_string ()) - - out.write ('\n\n') - out.write ('%d below threshold\n' % len (below)) - out.write ('%d unchanged\n' % len (unchanged)) - - def create_text_result_page (self, dir1, dir2, dest_dir, threshold): - self.write_text_result_page (dest_dir + '/index.txt', threshold) - - def create_html_result_page (self, dir1, dir2, dest_dir, threshold): - dir1 = dir1.replace ('//', '/') - dir2 = dir2.replace ('//', '/') - - (changed, below, unchanged) = self.thresholded_results (threshold) - - - html = '' - old_prefix = os.path.split (dir1)[1] - for link in changed: - html += link.html_record_string (dest_dir) - - - short_dir1 = shorten_string (dir1) - short_dir2 = shorten_string (dir2) - html = ''' - - - - - - -%(html)s -
    distance%(short_dir1)s%(short_dir2)s
    -''' % locals() - - html += ('

    ') - below_count = len (below) - - if below_count: - html += ('

    %d below threshold

    ' % below_count) - - html += ('

    %d unchanged

    ' % len (unchanged)) - - dest_file = dest_dir + '/index.html' - open_write_file (dest_file).write (html) - - - for link in changed: - link.link_files_for_html (dest_dir) - - - def print_results (self, threshold): - self.write_text_result_page ('', threshold) - -def compare_trees (dir1, dir2, dest_dir, threshold): - data = ComparisonData () - data.compare_trees (dir1, dir2) - data.read_sources () - - - data.print_results (threshold) - - if os.path.isdir (dest_dir): - system ('rm -rf %s '% dest_dir) - - data.write_changed (dest_dir, threshold) - data.create_html_result_page (dir1, dir2, dest_dir, threshold) - data.create_text_result_page (dir1, dir2, dest_dir, threshold) - -################################################################ -# TESTING - -def mkdir (x): - if not os.path.isdir (x): - print 'mkdir', x - os.makedirs (x) - -def link_file (x, y): - mkdir (os.path.split (y)[0]) - try: - print x, '->', y - os.link (x, y) - except OSError, z: - print 'OSError', x, y, z - raise OSError - -def open_write_file (x): - d = os.path.split (x)[0] - mkdir (d) - return open (x, 'w') - - -def system (x): - - print 'invoking', x - stat = os.system (x) - assert stat == 0 - - -def test_paired_files (): - print paired_files (os.environ["HOME"] + "/src/lilypond/scripts/", - os.environ["HOME"] + "/src/lilypond-stable/buildscripts/", '*.py') - - -def test_compare_trees (): - system ('rm -rf dir1 dir2') - system ('mkdir dir1 dir2') - system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir1') - system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir2') - system ('cp 20expr{-*.signature,.ly,.png,.eps,.log,.profile} dir1') - system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir2/') - system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir1/') - system ('cp 19-1.signature 19.sub-1.signature') - system ('cp 19.ly 19.sub.ly') - system ('cp 19.profile 19.sub.profile') - system ('cp 19.log 19.sub.log') - system ('cp 19.png 19.sub.png') - system ('cp 19.eps 19.sub.eps') - - system ('cp 20multipage* dir1') - system ('cp 20multipage* dir2') - system ('cp 19multipage-1.signature dir2/20multipage-1.signature') - - - system ('mkdir -p dir1/subdir/ dir2/subdir/') - system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir1/subdir/') - system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir2/subdir/') - system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir2/') - system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir1/') - system ('echo HEAD is 1 > dir1/tree.gittxt') - system ('echo HEAD is 2 > dir2/tree.gittxt') - - ## introduce differences - system ('cp 19-1.signature dir2/20-1.signature') - system ('cp 19.profile dir2/20.profile') - system ('cp 19.png dir2/20.png') - system ('cp 19multipage-page1.png dir2/20multipage-page1.png') - system ('cp 20-1.signature dir2/subdir/19.sub-1.signature') - system ('cp 20.png dir2/subdir/19.sub.png') - system ("sed 's/: /: 1/g' 20.profile > dir2/subdir/19.sub.profile") - - ## radical diffs. - system ('cp 19-1.signature dir2/20grob-1.signature') - system ('cp 19-1.signature dir2/20grob-2.signature') - system ('cp 19multipage.midi dir1/midi-differ.midi') - system ('cp 20multipage.midi dir2/midi-differ.midi') - system ('cp 19multipage.log dir1/log-differ.log') - system ('cp 19multipage.log dir2/log-differ.log && echo different >> dir2/log-differ.log && echo different >> dir2/log-differ.log') - - compare_trees ('dir1', 'dir2', 'compare-dir1dir2', options.threshold) - - -def test_basic_compare (): - ly_template = r""" - -\version "2.10.0" -#(define default-toplevel-book-handler - print-book-with-defaults-as-systems ) - -#(ly:set-option (quote no-point-and-click)) - -\sourcefilename "my-source.ly" - -%(papermod)s -\header { tagline = ##f } -\score { -<< -\new Staff \relative c { - c4^"%(userstring)s" %(extragrob)s - } -\new Staff \relative c { - c4^"%(userstring)s" %(extragrob)s - } ->> -\layout{} -} - -""" - - dicts = [{ 'papermod' : '', - 'name' : '20', - 'extragrob': '', - 'userstring': 'test' }, - { 'papermod' : '#(set-global-staff-size 19.5)', - 'name' : '19', - 'extragrob': '', - 'userstring': 'test' }, - { 'papermod' : '', - 'name' : '20expr', - 'extragrob': '', - 'userstring': 'blabla' }, - { 'papermod' : '', - 'name' : '20grob', - 'extragrob': 'r2. \\break c1', - 'userstring': 'test' }, - ] - - for d in dicts: - open (d['name'] + '.ly','w').write (ly_template % d) - - names = [d['name'] for d in dicts] - - system ('lilypond -ddump-profile -dseparate-log-files -ddump-signatures --png -dbackend=eps ' + ' '.join (names)) - - - multipage_str = r''' - #(set-default-paper-size "a6") - \score { - \relative {c1 \pageBreak c1 } - \layout {} - \midi {} - } - ''' - - open ('20multipage.ly', 'w').write (multipage_str.replace ('c1', 'd1')) - open ('19multipage.ly', 'w').write ('#(set-global-staff-size 19.5)\n' + multipage_str) - system ('lilypond -dseparate-log-files -ddump-signatures --png 19multipage 20multipage ') - - test_compare_signatures (names) - -def test_compare_signatures (names, timing=False): - import time - - times = 1 - if timing: - times = 100 - - t0 = time.clock () - - count = 0 - for t in range (0, times): - sigs = dict ((n, read_signature_file ('%s-1.signature' % n)) for n in names) - count += 1 - - if timing: - print 'elapsed', (time.clock() - t0)/count - - - t0 = time.clock () - count = 0 - combinations = {} - for (n1, s1) in sigs.items(): - for (n2, s2) in sigs.items(): - combinations['%s-%s' % (n1, n2)] = SystemLink (s1,s2).distance () - count += 1 - - if timing: - print 'elapsed', (time.clock() - t0)/count - - results = combinations.items () - results.sort () - for k,v in results: - print '%-20s' % k, v - - assert combinations['20-20'] == (0.0,0.0,0.0) - assert combinations['20-20expr'][0] > 0.0 - assert combinations['20-19'][2] < 10.0 - assert combinations['20-19'][2] > 0.0 - - -def run_tests (): - dir = 'test-output-distance' - - do_clean = not os.path.exists (dir) - - print 'test results in ', dir - if do_clean: - system ('rm -rf ' + dir) - system ('mkdir ' + dir) - - os.chdir (dir) - if do_clean: - test_basic_compare () - - test_compare_trees () - -################################################################ -# - -def main (): - p = optparse.OptionParser ("output-distance - compare LilyPond formatting runs") - p.usage = 'output-distance.py [options] tree1 tree2' - - p.add_option ('', '--test-self', - dest="run_test", - action="store_true", - help='run test method') - - p.add_option ('--max-count', - dest="max_count", - metavar="COUNT", - type="int", - default=0, - action="store", - help='only analyze COUNT signature pairs') - - p.add_option ('', '--threshold', - dest="threshold", - default=0.3, - action="store", - type="float", - help='threshold for geometric distance') - - p.add_option ('--no-compare-images', - dest="compare_images", - default=True, - action="store_false", - help="Don't run graphical comparisons") - - p.add_option ('--create-images', - dest="create_images", - default=False, - action="store_true", - help="Create PNGs from EPSes") - - - p.add_option ('--local-datadir', - dest="local_data_dir", - default=False, - action="store_true", - help='whether to use the share/lilypond/ directory in the test directory') - - p.add_option ('-o', '--output-dir', - dest="output_dir", - default=None, - action="store", - type="string", - help='where to put the test results [tree2/compare-tree1tree2]') - - global options - (options, args) = p.parse_args () - - if options.run_test: - run_tests () - sys.exit (0) - - if len (args) != 2: - p.print_usage() - sys.exit (2) - - name = options.output_dir - if not name: - name = args[0].replace ('/', '') - name = os.path.join (args[1], 'compare-' + shorten_string (name)) - - compare_trees (args[0], args[1], name, options.threshold) - -if __name__ == '__main__': - main() - diff --git a/buildscripts/pfx2ttf.fontforge b/buildscripts/pfx2ttf.fontforge deleted file mode 100644 index 6428c7cf86..0000000000 --- a/buildscripts/pfx2ttf.fontforge +++ /dev/null @@ -1,31 +0,0 @@ -#!@FONTFORGE@ - -Open($1); -MergeKern($2) - - -# The AFM files of `New Century Schoolbook' family as distributed within the -# urw-fonts-1.0.7pre41.tar.bz2 archive contain a bunch of ligatures which -# shouldn't be active by default: -# -# T + M -> trademark -# N + o -> afii61352 -# i + j -> ij -# I + J -> IJ -# -# This font bundle is shipped by Fedora Core 6 and other GNU/Linux -# distributions; we simply remove those ligatures. - -SelectIf("trademark", "trademark", \ - "afii61352", "afii61352", \ - "ij", "ij", \ - "IJ", "IJ"); -if (Strtol($version) < 20070501) - RemoveATT("Ligature", "*", "*"); -else - RemovePosSub("*"); -endif - -Generate($3 + $fontname + ".otf"); - -# EOF diff --git a/buildscripts/postprocess_html.py b/buildscripts/postprocess_html.py deleted file mode 100644 index e94da79755..0000000000 --- a/buildscripts/postprocess_html.py +++ /dev/null @@ -1,361 +0,0 @@ -#!@PYTHON@ - -""" -Postprocess HTML files: -add footer, tweak links, add language selection menu. -""" -import re -import os -import time -import operator - -import langdefs - -# This is to try to make the docball not too big with almost duplicate files -# see process_links() -non_copied_pages = ['Documentation/user/out-www/lilypond-big-page', - 'Documentation/user/out-www/lilypond-internals-big-page', - 'Documentation/user/out-www/lilypond-learning-big-page', - 'Documentation/user/out-www/lilypond-program-big-page', - 'Documentation/user/out-www/music-glossary-big-page', - 'out-www/examples', - 'Documentation/topdocs', - 'Documentation/bibliography', - 'Documentation/out-www/THANKS', - 'Documentation/out-www/DEDICATION', - 'Documentation/out-www/devel', - 'input/'] - -def _doc (s): - return s - -header = r""" -""" - -footer = ''' - -''' -footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).') -# ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process -footer_report_links = _doc ('Your suggestions for the documentation are welcome, please report errors to our bug list.') - - -mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs' -suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding' - -header_tag = '' -header_tag_re = re.compile (header_tag) - -footer_tag = '' -footer_tag_re = re.compile (footer_tag) - -lang_available = _doc ("Other languages: %s.") -browser_lang = _doc ('About automatic language selection.') -browser_language_url = "/web/about/browser-language" - -LANGUAGES_TEMPLATE = ''' -

    - %(language_available)s -
    - %(browser_language)s -

    -''' - - -html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$') -pages_dict = {} - -def build_pages_dict (filelist): - """Build dictionary of available translations of each page""" - global pages_dict - for f in filelist: - m = html_re.match (f) - if m: - g = m.groups() - if len (g) <= 1 or g[1] == None: - e = '' - else: - e = g[1] - if not g[0] in pages_dict: - pages_dict[g[0]] = [e] - else: - pages_dict[g[0]].append (e) - -def source_links_replace (m, source_val): - return 'href="' + os.path.join (source_val, m.group (1)) + '"' - -splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\ -Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\ -lilypond-learning))/') - -snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets') -user_ref_re = re.compile ('href="(?:\.\./)?lilypond\ -(-internals|-learning|-program|(?!-snippets))') - -docindex_link_re = re.compile (r'href="index.html"') - - -## Windows does not support symlinks. -# This function avoids creating symlinks for splitted HTML manuals -# Get rid of symlinks in GNUmakefile.in (local-WWW-post) -# this also fixes missing PNGs only present in translated docs -def hack_urls (s, prefix): - if splitted_docs_re.match (prefix): - s = re.sub ('(href|src)="(../lily-.*?|.*?[.]png)"', '\\1="../\\2"', s) - - # fix xrefs between documents in different directories ad hoc - if 'user/out-www/lilypond' in prefix: - s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s) - elif 'input/lsr' in prefix: - s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s) - - # we also need to replace in the lsr, which is already processed above! - if 'input/' in prefix or 'Documentation/topdocs' in prefix: - # fix the link from the regtest, lsr and topdoc pages to the doc index - # (rewrite prefix to obtain the relative path of the doc index page) - rel_link = re.sub (r'out-www/.*$', '', prefix) - rel_link = re.sub (r'[^/]*/', '../', rel_link) - if 'input/regression' in prefix: - indexfile = "Documentation/devel" - else: - indexfile = "index" - s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s) - - source_path = os.path.join (os.path.dirname (prefix), 'source') - if not os.path.islink (source_path): - return s - source_val = os.readlink (source_path) - return re.sub ('href="source/(.*?)"', lambda m: source_links_replace (m, source_val), s) - -body_tag_re = re.compile ('(?i)]*)>') -html_tag_re = re.compile ('(?i)') -doctype_re = re.compile ('(?i)\n' -css_re = re.compile ('(?i)]*)href="[^">]*?lilypond.*\.css"([^>]*)>') -end_head_tag_re = re.compile ('(?i)') -css_link = """ - - - -""" - - -def add_header (s, prefix): - """Add header (, doctype and CSS)""" - if header_tag_re.search (s) == None: - body = '' - (s, n) = body_tag_re.subn (body + header, s, 1) - if not n: - (s, n) = html_tag_re.subn ('' + header, s, 1) - if not n: - s = header + s - - s = header_tag + '\n' + s - - if doctype_re.search (s) == None: - s = doctype + s - - if css_re.search (s) == None: - depth = (prefix.count ('/') - 1) * '../' - s = end_head_tag_re.sub ((css_link % {'rel': depth}) + '', s) - return s - -title_tag_re = re.compile ('.*?(.*?)', re.DOTALL) -AT_web_title_re = re.compile ('@WEB-TITLE@') - -def add_title (s): - # urg - # maybe find first node? - fallback_web_title = '-- --' - m = title_tag_re.match (s) - if m: - fallback_web_title = m.group (1) - s = AT_web_title_re.sub (fallback_web_title, s) - return s - -footer_insert_re = re.compile ('') -end_body_re = re.compile ('(?i)') -end_html_re = re.compile ('(?i)') - -def add_footer (s, footer_text): - """add footer""" - (s, n) = footer_insert_re.subn (footer_text + '\n' + '', s, 1) - if not n: - (s, n) = end_body_re.subn (footer_text + '\n' + '', s, 1) - if not n: - (s, n) = end_html_re.subn (footer_text + '\n' + '', s, 1) - if not n: - s += footer_text + '\n' - return s - -def find_translations (prefix, lang_ext): - """find available translations of a page""" - available = [] - missing = [] - for l in langdefs.LANGUAGES: - e = l.webext - if lang_ext != e: - if e in pages_dict[prefix]: - available.append (l) - elif lang_ext == '' and l.enabled and reduce (operator.and_, - [not prefix.startswith (s) - for s in non_copied_pages]): - # English version of missing translated pages will be written - missing.append (e) - return available, missing - -online_links_re = re.compile ('''(href|src)=['"]\ -((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\ -([.]html)(#[^"']*|)['"]''') -offline_links_re = re.compile ('href=[\'"]\ -((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]') -big_page_name_re = re.compile ('''(.+?)-big-page''') - -def process_i18n_big_page_links (match, prefix, lang_ext): - big_page_name = big_page_name_re.match (match.group (1)) - if big_page_name: - destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix), - big_page_name.group (0))) - if not lang_ext in pages_dict[destination_path]: - return match.group (0) - return 'href="' + match.group (1) + '.' + lang_ext \ - + match.group (2) + match.group (3) + '"' - -def process_links (s, prefix, lang_ext, file_name, missing, target): - page_flavors = {} - if target == 'online': - # Strip .html, suffix for auto language selection (content - # negotiation). The menu must keep the full extension, so do - # this before adding the menu. - page_flavors[file_name] = \ - [lang_ext, online_links_re.sub ('\\1="\\2\\4"', s)] - elif target == 'offline': - # in LANG doc index: don't rewrite .html suffixes - # as not all .LANG.html pages exist; - # the doc index should be translated and contain links with the right suffixes - if prefix == 'Documentation/out-www/index': - page_flavors[file_name] = [lang_ext, s] - elif lang_ext == '': - page_flavors[file_name] = [lang_ext, s] - for e in missing: - page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = \ - [e, offline_links_re.sub ('href="\\1.' + e + '\\2\\3"', s)] - else: - # For saving bandwidth and disk space, we don't duplicate big pages - # in English, so we must process translated big pages links differently. - if 'big-page' in prefix: - page_flavors[file_name] = \ - [lang_ext, - offline_links_re.sub \ - (lambda match: process_i18n_big_page_links (match, prefix, lang_ext), - s)] - else: - page_flavors[file_name] = \ - [lang_ext, - offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)] - return page_flavors - -def add_menu (page_flavors, prefix, available, target, translation): - for k in page_flavors: - language_menu = '' - languages = '' - if page_flavors[k][0] != '': - t = translation[page_flavors[k][0]] - else: - t = _doc - for lang in available: - lang_file = lang.file_name (os.path.basename (prefix), '.html') - if language_menu != '': - language_menu += ', ' - language_menu += '%s' % (lang_file, t (lang.name)) - if target == 'offline': - browser_language = '' - elif target == 'online': - browser_language = t (browser_lang) % browser_language_url - if language_menu: - language_available = t (lang_available) % language_menu - languages = LANGUAGES_TEMPLATE % vars () - page_flavors[k][1] = add_footer (page_flavors[k][1], languages) - return page_flavors - - -def process_html_files (package_name = '', - package_version = '', - target = 'offline', - name_filter = lambda s: s): - """Add header, footer and tweak links to a number of HTML files - - Arguments: - package_name=NAME set package_name to NAME - package_version=VERSION set package version to VERSION - targets=offline|online set page processing depending on the target - offline is for reading HTML pages locally - online is for hosting the HTML pages on a website with content - negotiation - name_filter a HTML file name filter - """ - translation = langdefs.translation - localtime = time.strftime ('%c %Z', time.localtime (time.time ())) - - if "http://" in mail_address: - mail_address_url = mail_address - else: - mail_address_url= 'mailto:' + mail_address - - versiontup = package_version.split ('.') - branch_str = _doc ('stable-branch') - if int (versiontup[1]) % 2: - branch_str = _doc ('development-branch') - - # Initialize dictionaries for string formatting - subst = {} - subst[''] = dict ([i for i in globals ().items() if type (i[1]) is str]) - subst[''].update (dict ([i for i in locals ().items() if type (i[1]) is str])) - for l in translation: - e = langdefs.LANGDICT[l].webext - if e: - subst[e] = {} - for name in subst['']: - subst[e][name] = translation[l] (subst[''][name]) - # Do deeper string formatting as early as possible, - # so only one '%' formatting pass is needed later - for e in subst: - subst[e]['footer_name_version'] = subst[e]['footer_name_version'] % subst[e] - subst[e]['footer_report_links'] = subst[e]['footer_report_links'] % subst[e] - - for prefix, ext_list in pages_dict.items (): - for lang_ext in ext_list: - file_name = langdefs.lang_file_name (prefix, lang_ext, '.html') - in_f = open (file_name) - s = in_f.read() - in_f.close() - - s = s.replace ('%', '%%') - s = hack_urls (s, prefix) - s = add_header (s, prefix) - - ### add footer - if footer_tag_re.search (s) == None: - s = add_footer (s, footer_tag + footer) - - available, missing = find_translations (prefix, lang_ext) - page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target) - # Add menu after stripping: must not have autoselection for language menu. - page_flavors = add_menu (page_flavors, prefix, available, target, translation) - for k in page_flavors: - page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]] - out_f = open (name_filter (k), 'w') - out_f.write (page_flavors[k][1]) - out_f.close() - # if the page is translated, a .en.html symlink is necessary for content negotiation - if target == 'online' and ext_list != ['']: - os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html')) diff --git a/buildscripts/pytt.py b/buildscripts/pytt.py deleted file mode 100644 index 1026c02bf7..0000000000 --- a/buildscripts/pytt.py +++ /dev/null @@ -1,24 +0,0 @@ -#! @PYTHON@ - -import os -import re -import sys - -frm = re.compile (sys.argv[1], re.MULTILINE) -to = sys.argv[2] - -if not sys.argv[3:] or sys.argv[3] == '-': - sys.stdout.write (re.sub (frm, to, sys.stdin.read ())) -for file in sys.argv[3:]: - s = open (file).read () - name = os.path.basename (file) - base, ext = os.path.splitext (name) - t = re.sub (frm, to % locals (), s) - if s != t: - if 1: - os.system ('mv %(file)s %(file)s~~' % locals ()) - h = open (file, "w") - h.write (t) - h.close () - else: - sys.stdout.write (t) diff --git a/buildscripts/readlink.py b/buildscripts/readlink.py deleted file mode 100644 index 70267ffa59..0000000000 --- a/buildscripts/readlink.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python -import os -import sys - -for i in sys.argv[1:]: - print os.path.realpath (i) diff --git a/buildscripts/tely-gettext.py b/buildscripts/tely-gettext.py deleted file mode 100644 index b4e566044f..0000000000 --- a/buildscripts/tely-gettext.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# tely-gettext.py - -# Temporary script that helps translated docs sources conversion -# for texi2html processing - -# USAGE: tely-gettext.py BUILDSCRIPT-DIR LOCALEDIR LANG FILES - -print "tely_gettext.py" - -import sys -import re -import os -import gettext - -if len (sys.argv) > 3: - buildscript_dir, localedir, lang = sys.argv[1:4] -else: - print """USAGE: tely-gettext.py BUILDSCRIPT-DIR LOCALEDIR LANG FILES - For example buildscripts/tely-gettext.py buildscripts Documentation/po/out-www de Documentation/de/user/*.tely""" - sys.exit (1) - -sys.path.append (buildscript_dir) -import langdefs - -double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep -t = gettext.translation('lilypond-doc', localedir, [lang]) -_doc = t.gettext - -include_re = re.compile (r'@include (.*?)$', re.M) -whitespaces = re.compile (r'\s+') -ref_re = re.compile (r'(?ms)@(ruser|rprogram|ref|rlearning)\{(.*?)\}') -node_section_re = re.compile (r'@node (.*?)\n@((?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) (.*?)\n') -menu_entry_re = re.compile (r'\* (.*?)::') - -def ref_gettext (m): - r = whitespaces.sub (' ', m.group (2)) - return '@' + m.group (1) + '{' + _doc (r) + '}' - -def node_gettext (m): - return '@node ' + _doc (m.group (1)) + '\n@' + \ - m.group (2) + ' ' + _doc (m.group (3)) + \ - '\n@translationof ' + m.group (1) + '\n' - -def menu_entry_gettext (m): - return '* ' + _doc (m.group (1)) + '::' - -def process_file (filename): - print "Processing %s" % filename - f = open (filename, 'r') - page = f.read () - f.close() - page = node_section_re.sub (node_gettext, page) - page = ref_re.sub (ref_gettext, page) - page = menu_entry_re.sub (menu_entry_gettext, page) - page = page.replace ("""-- SKELETON FILE -- -When you actually translate this file, please remove these lines as -well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", """@c -- SKELETON FILE --""") - page = page.replace ('UNTRANSLATED NODE: IGNORE ME', "@c UNTRANSLATED NODE: IGNORE ME") - includes = [whitespaces.sub ('', f) for f in include_re.findall (page)] - f = open (filename, 'w') - f.write (page) - f.close () - dir = os.path.dirname (filename) - for file in includes: - p = os.path.join (dir, file) - if os.path.exists (p): - process_file (p) - -for filename in sys.argv[4:]: - process_file (filename) diff --git a/buildscripts/texi-gettext.py b/buildscripts/texi-gettext.py deleted file mode 100644 index 546819b155..0000000000 --- a/buildscripts/texi-gettext.py +++ /dev/null @@ -1,77 +0,0 @@ -#!@PYTHON@ -# -*- coding: utf-8 -*- -# texi-gettext.py - -# USAGE: texi-gettext.py [-o OUTDIR] LANG FILES -# -# -o OUTDIR specifies that output files should rather be written in OUTDIR -# - -print "texi_gettext.py" - -import sys -import re -import os -import getopt - -import langdefs - -optlist, args = getopt.getopt (sys.argv[1:],'o:') -lang = args[0] -files = args[1:] - -outdir = '.' -for x in optlist: - if x[0] == '-o': - outdir = x[1] - -double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep -_doc = langdefs.translation[lang] - -include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M) -whitespaces = re.compile (r'\s+') -ref_re = re.compile (r'(?ms)@(rglos|ruser|rprogram|ref)(\{)(.*?)(\})') -node_section_re = re.compile (r'@(node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading)( )(.*?)(\n)') -menu_entry_re = re.compile (r'\* (.*?)::') - -def title_gettext (m): - if m.group (2) == '{': - r = whitespaces.sub (' ', m.group (3)) - else: - r = m.group (3) - return '@' + m.group (1) + m.group (2) + _doc (r) + m.group (4) - -def menu_entry_gettext (m): - return '* ' + _doc (m.group (1)) + '::' - -def include_replace (m, filename): - if os.path.exists (os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'): - return '@include ' + m.group(1) + '.pdftexi' - return m.group(0) - -def process_file (filename): - print "Processing %s" % filename - f = open (filename, 'r') - page = f.read () - f.close() - page = node_section_re.sub (title_gettext, page) - page = ref_re.sub (title_gettext, page) - page = menu_entry_re.sub (menu_entry_gettext, page) - page = page.replace ("""-- SKELETON FILE -- -When you actually translate this file, please remove these lines as -well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", '') - page = page.replace ('UNTRANSLATED NODE: IGNORE ME', _doc ("This section has not been translated yet; please refer to the manual in English.")) - includes = include_re.findall (page) - page = include_re.sub (lambda m: include_replace (m, filename), page) - p = os.path.join (outdir, filename) [:-4] + 'pdftexi' - f = open (p, 'w') - f.write (page) - f.close () - dir = os.path.dirname (filename) - for file in includes: - p = os.path.join (dir, file) + '.texi' - if os.path.exists (p): - process_file (p) - -for filename in files: - process_file (filename) diff --git a/buildscripts/texi-langutils.py b/buildscripts/texi-langutils.py deleted file mode 100644 index 720b520a2e..0000000000 --- a/buildscripts/texi-langutils.py +++ /dev/null @@ -1,176 +0,0 @@ -#!@PYTHON@ -# texi-langutils.py - -# WARNING: this script can't find files included in a different directory - -import sys -import re -import getopt -import os - -import langdefs - -def read_pipe (command): - print command - pipe = os.popen (command) - output = pipe.read () - if pipe.close (): - print "pipe failed: %(command)s" % locals () - return output - - -optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext']) -process_includes = not ('-n', '') in optlist # -n don't process @include's in texinfo files - -make_gettext = ('--gettext', '') in optlist # --gettext generate a node list from a Texinfo source -make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source - -output_file = 'doc.pot' - -# @untranslated should be defined as a macro in Texinfo source -node_blurb = '''@untranslated -''' -doclang = '' -head_committish = read_pipe ('git-rev-parse HEAD') -intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*- -@c This file is part of %(topfile)s -@ignore - Translation of GIT committish: %(head_committish)s - When revising a translation, copy the HEAD committish of the - version that you are working on. See TRANSLATION for details. -@end ignore -''' - -end_blurb = """ -@c -- SKELETON FILE -- -""" - -for x in optlist: - if x[0] == '-o': # -o NAME set PO output file name to NAME - output_file = x[1] - elif x[0] == '-d': # -d DIR set working directory to DIR - os.chdir (x[1]) - elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB - node_blurb = x[1] - elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB - intro_blurb = x[1] - elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG - doclang = '; documentlanguage: ' + x[1] - -texinfo_with_menus_re = re.compile (r"^(\*) +([^:\n]+)::.*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.*?)$|@(rglos){(.+?)}", re.M) - -texinfo_re = re.compile (r"^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M) - -ly_string_re = re.compile (r'^([a-zA-Z]+)[\t ]*=|%+[\t ]*(.*)$|\\(?:new|context)\s+(?:[a-zA-Z]*?(?:Staff(?:Group)?|Voice|FiguredBass|FretBoards|Names|Devnull))\s+=\s+"?([a-zA-Z]+)"?\s+') -lsr_verbatim_ly_re = re.compile (r'% begin verbatim$') -texinfo_verbatim_ly_re = re.compile (r'^@lilypond\[.*?verbatim') - -def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None, scan_ly=False): - try: - f = open (texifilename, 'r') - texifile = f.read () - f.close () - printedfilename = texifilename.replace ('../','') - includes = [] - - # process ly var names and comments - if output_file and (scan_ly or texifilename.endswith ('.ly')): - lines = texifile.splitlines () - i = 0 - in_verb_ly_block = False - if texifilename.endswith ('.ly'): - verbatim_ly_re = lsr_verbatim_ly_re - else: - verbatim_ly_re = texinfo_verbatim_ly_re - for i in range (len (lines)): - if verbatim_ly_re.search (lines[i]): - in_verb_ly_block = True - elif lines[i].startswith ('@end lilypond'): - in_verb_ly_block = False - elif in_verb_ly_block: - for (var, comment, context_id) in ly_string_re.findall (lines[i]): - if var: - output_file.write ('# ' + printedfilename + ':' + \ - str (i + 1) + ' (variable)\n_(r"' + var + '")\n') - elif comment: - output_file.write ('# ' + printedfilename + ':' + \ - str (i + 1) + ' (comment)\n_(r"' + \ - comment.replace ('"', '\\"') + '")\n') - elif context_id: - output_file.write ('# ' + printedfilename + ':' + \ - str (i + 1) + ' (context id)\n_(r"' + \ - context_id + '")\n') - - # process Texinfo node names and section titles - if write_skeleton: - g = open (os.path.basename (texifilename), 'w') - subst = globals () - subst.update (locals ()) - g.write (i_blurb % subst) - tutu = texinfo_with_menus_re.findall (texifile) - node_trigger = False - for item in tutu: - if item[0] == '*': - g.write ('* ' + item[1] + '::\n') - elif output_file and item[4] == 'rglos': - output_file.write ('_(r"' + item[5] + '") # @rglos in ' + printedfilename + '\n') - elif item[2] == 'menu': - g.write ('@menu\n') - elif item[2] == 'end menu': - g.write ('@end menu\n\n') - else: - g.write ('@' + item[2] + ' ' + item[3] + '\n') - if node_trigger: - g.write (n_blurb) - node_trigger = False - elif item[2] == 'include': - includes.append (item[3]) - else: - if output_file: - output_file.write ('# @' + item[2] + ' in ' + \ - printedfilename + '\n_(r"' + item[3].strip () + '")\n') - if item[2] == 'node': - node_trigger = True - g.write (end_blurb) - g.close () - - elif output_file: - toto = texinfo_re.findall (texifile) - for item in toto: - if item[0] == 'include': - includes.append(item[1]) - elif item[2] == 'rglos': - output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n') - else: - output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n') - - if process_includes: - dir = os.path.dirname (texifilename) - for item in includes: - process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file, scan_ly) - except IOError, (errno, strerror): - sys.stderr.write ("I/O error(%s): %s: %s\n" % (errno, texifilename, strerror)) - - -if intro_blurb != '': - intro_blurb += '\n\n' -if node_blurb != '': - node_blurb = '\n' + node_blurb + '\n\n' -if make_gettext: - node_list_filename = 'node_list' - node_list = open (node_list_filename, 'w') - node_list.write ('# -*- coding: utf-8 -*-\n') - for texi_file in texi_files: - # Urgly: scan ly comments and variable names only in English doco - is_english_doc = 'Documentation/user' in texi_file - process_texi (texi_file, intro_blurb, node_blurb, make_skeleton, - os.path.basename (texi_file), node_list, - scan_ly=is_english_doc) - for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'): - node_list.write ('_(r"' + word + '")\n') - node_list.close () - os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename) -else: - for texi_file in texi_files: - process_texi (texi_file, intro_blurb, node_blurb, make_skeleton, - os.path.basename (texi_file)) diff --git a/buildscripts/texi-skeleton-update.py b/buildscripts/texi-skeleton-update.py deleted file mode 100644 index f761408274..0000000000 --- a/buildscripts/texi-skeleton-update.py +++ /dev/null @@ -1,25 +0,0 @@ -#!@PYTHON@ -# texi-skeleton-update.py - -import sys -import glob -import os -import shutil - -sys.stderr.write ('texi-skeleton-update.py\n') - -orig_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[1] + '/*.ite??')]) -new_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[2] + '/*.ite??')]) - -for f in new_skeletons: - if f in orig_skeletons: - g = open (os.path.join (sys.argv[1], f), 'r').read () - if '-- SKELETON FILE --' in g: - sys.stderr.write ("Updating %s...\n" % f) - shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1]) - elif f != 'fdl.itexi': - sys.stderr.write ("Copying new file %s...\n" % f) - shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1]) - -for f in orig_skeletons.difference (new_skeletons): - sys.stderr.write ("Warning: outdated skeleton file %s\n" % f) diff --git a/buildscripts/texi2omf.py b/buildscripts/texi2omf.py deleted file mode 100644 index cc2603f5e0..0000000000 --- a/buildscripts/texi2omf.py +++ /dev/null @@ -1,154 +0,0 @@ -#!@PYTHON@ - -import getopt -import os -import re -import sys -import time - -def usage (): - sys.stderr.write (''' -texi2omf [options] FILE.texi > FILE.omf - -Options: - ---format=FORM set format FORM (HTML, PS, PDF, [XML]). ---location=FILE file name as installed on disk. ---version=VERSION - -Use the following commands (enclose in @ignore) - -@omfsubject . . -@omfdescription . . -@omftype . . - -etc. - - -''') - -(options, files) = getopt.getopt (sys.argv[1:], '', - ['format=', 'location=', 'version=']) - -license = 'FDL' -location = '' -version = '' -email = os.getenv ('MAILADDRESS') -name = os.getenv ('USERNAME') -format = 'xml' - -for (o, a) in options: - if o == '--format': - format = a - elif o == '--location': - location = 'file:%s' % a - elif o == '--version': - version = a - else: - assert 0 - - -if not files: - usage () - sys.exit (2) - - -formats = { - 'html' : 'text/html', - 'pdf' : 'application/pdf', - 'ps.gz' : 'application/postscript', - 'ps' : 'application/postscript', - 'xml' : 'text/xml', - } - -if not formats.has_key (format): - sys.stderr.write ("Format `%s' unknown\n" % format) - sys.exit (1) - - -infile = files[0] - -today = time.localtime () - -texi = open (infile).read () - -if not location: - location = 'file:/%s' % re.sub (r'\..*', '.' + format, infile) - -omf_vars = { - 'date': '%d-%d-%d' % today[:3], - 'mimeformat': formats[format], - 'maintainer': "%s (%s)" % (name, email), - 'version' : version, - 'location' : location, - 'language' : 'C', - } - -omf_caterories = ['subject', 'creator', 'maintainer', 'contributor', - 'title', 'subtitle', 'version', 'category', 'type', - 'description', 'license', 'language',] - -for a in omf_caterories: - m = re.search ('@omf%s (.*)\n'% a, texi) - if m: - omf_vars[a] = m.group (1) - elif not omf_vars.has_key (a): - omf_vars[a] = '' - -if not omf_vars['title']: - title = '' - m = re.search ('@title (.*)\n', texi) - if m: - title = m.group (1) - - subtitle = '' - m = re.search ('@subtitle (.*)\n', texi) - if m: - subtitle = m.group (1) - - if subtitle: - title = '%s -- %s' % (title, subtitle) - - omf_vars['title'] = title - -if not omf_vars['creator']: - m = re.search ('@author (.*)\n', texi) - if m: - omf_vars['creator'] = m.group (1) - - - -print r''' - - - - - %(creator)s - - - %(maintainer)s - - - %(title)s - - - %(date)s - - - - - %(description)s - - - %(type)s - - - - - - - - -''' % omf_vars - - diff --git a/buildscripts/translations-status.py b/buildscripts/translations-status.py deleted file mode 100644 index c93199354f..0000000000 --- a/buildscripts/translations-status.py +++ /dev/null @@ -1,578 +0,0 @@ -#!/usr/bin/env python - -""" -USAGE: translations-status.py BUILDSCRIPT-DIR LOCALEDIR - - This script must be run from Documentation/ - - Reads template files translations.template.html.in -and for each LANG in LANGUAGES LANG/translations.template.html.in - Writes translations.html.in and for each LANG in LANGUAGES -translations.LANG.html.in - Writes out/translations-status.txt - Updates word counts in TRANSLATION -""" - -import sys -import re -import string -import os - -import langdefs -import buildlib - -def progress (str): - sys.stderr.write (str + '\n') - -progress ("translations-status.py") - -_doc = lambda s: s - -# load gettext messages catalogs -translation = langdefs.translation - - -language_re = re.compile (r'^@documentlanguage (.+)', re.M) -comments_re = re.compile (r'^@ignore\n(.|\n)*?\n@end ignore$|@c .*?$', re.M) -space_re = re.compile (r'\s+', re.M) -lilypond_re = re.compile (r'@lilypond({.*?}|(.|\n)*?\n@end lilypond$)', re.M) -node_re = re.compile ('^@node .*?$', re.M) -title_re = re.compile ('^@(top|chapter|(?:sub){0,2}section|' + \ -'(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?) (.*?)$', re.M) -include_re = re.compile ('^@include (.*?)$', re.M) - -translators_re = re.compile (r'^@c\s+Translators\s*:\s*(.*?)$', re.M | re.I) -checkers_re = re.compile (r'^@c\s+Translation\s*checkers\s*:\s*(.*?)$', - re.M | re.I) -status_re = re.compile (r'^@c\s+Translation\s*status\s*:\s*(.*?)$', re.M | re.I) -post_gdp_re = re.compile ('post.GDP', re.I) -untranslated_node_str = '@untranslated' -skeleton_str = '-- SKELETON FILE --' - -section_titles_string = _doc ('Section titles') -last_updated_string = _doc ('

    Last updated %s

    \n') -detailed_status_heads = [_doc ('Translators'), _doc ('Translation checkers'), - _doc ('Translated'), _doc ('Up to date'), - _doc ('Other info')] -format_table = { - 'not translated': {'color':'d0f0f8', 'short':_doc ('no'), 'abbr':'NT', - 'long':_doc ('not translated')}, - 'partially translated': {'color':'dfef77', - 'short':_doc ('partially (%(p)d %%)'), - 'abbr':'%(p)d%%', - 'long':_doc ('partially translated (%(p)d %%)')}, - 'fully translated': {'color':'1fff1f', 'short':_doc ('yes'), 'abbr':'FT', - 'long': _doc ('translated')}, - 'up to date': {'short':_doc ('yes'), 'long':_doc ('up to date'), - 'abbr':'100%%', 'vague':_doc ('up to date')}, - 'outdated': {'short':_doc ('partially'), 'abbr':'%(p)d%%', - 'vague':_doc ('partially up to date')}, - 'N/A': {'short':_doc ('N/A'), 'abbr':'N/A', 'color':'d587ff', 'vague':''}, - 'pre-GDP':_doc ('pre-GDP'), - 'post-GDP':_doc ('post-GDP') -} - -texi_level = { -# (Unumbered/Numbered/Lettered, level) - 'top': ('u', 0), - 'unnumbered': ('u', 1), - 'unnumberedsec': ('u', 2), - 'unnumberedsubsec': ('u', 3), - 'chapter': ('n', 1), - 'section': ('n', 2), - 'subsection': ('n', 3), - 'appendix': ('l', 1) -} - -appendix_number_trans = string.maketrans ('@ABCDEFGHIJKLMNOPQRSTUVWXY', - 'ABCDEFGHIJKLMNOPQRSTUVWXYZ') - -class SectionNumber (object): - def __init__ (self): - self.__data = [[0,'u']] - - def __increase_last_index (self): - type = self.__data[-1][1] - if type == 'l': - self.__data[-1][0] = \ - self.__data[-1][0].translate (appendix_number_trans) - elif type == 'n': - self.__data[-1][0] += 1 - - def format (self): - if self.__data[-1][1] == 'u': - return '' - return '.'.join ([str (i[0]) for i in self.__data if i[1] != 'u']) + ' ' - - def increase (self, (type, level)): - if level == 0: - self.__data = [[0,'u']] - while level + 1 < len (self.__data): - del self.__data[-1] - if level + 1 > len (self.__data): - self.__data.append ([0, type]) - if type == 'l': - self.__data[-1][0] = '@' - if type == self.__data[-1][1]: - self.__increase_last_index () - else: - self.__data[-1] = ([0, type]) - if type == 'l': - self.__data[-1][0] = 'A' - elif type == 'n': - self.__data[-1][0] = 1 - return self.format () - - -def percentage_color (percent): - p = percent / 100.0 - if p < 0.33: - c = [hex (int (3 * p * b + (1 - 3 * p) * a))[2:] - for (a, b) in [(0xff, 0xff), (0x5c, 0xa6), (0x5c, 0x4c)]] - elif p < 0.67: - c = [hex (int ((3 * p - 1) * b + (2 - 3 * p) * a))[2:] - for (a, b) in [(0xff, 0xff), (0xa6, 0xff), (0x4c, 0x3d)]] - else: - c = [hex (int ((3 * p - 2) * b + 3 * (1 - p) * a))[2:] - for (a, b) in [(0xff, 0x1f), (0xff, 0xff), (0x3d, 0x1f)]] - return ''.join (c) - - -def update_word_count (text, filename, word_count): - return re.sub (r'(?m)^(\d+) *' + filename, - str (word_count).ljust (6) + filename, - text) - -po_msgid_re = re.compile (r'^msgid "(.*?)"(?:\n"(.*?)")*', re.M) - -def po_word_count (po_content): - s = ' '.join ([''.join (t) for t in po_msgid_re.findall (po_content)]) - return len (space_re.split (s)) - -sgml_tag_re = re.compile (r'<.*?>', re.S) - -def sgml_word_count (sgml_doc): - s = sgml_tag_re.sub ('', sgml_doc) - return len (space_re.split (s)) - -def tely_word_count (tely_doc): - ''' - Calculate word count of a Texinfo document node by node. - - Take string tely_doc as an argument. - Return a list of integers. - - Texinfo comments and @lilypond blocks are not included in word counts. - ''' - tely_doc = comments_re.sub ('', tely_doc) - tely_doc = lilypond_re.sub ('', tely_doc) - nodes = node_re.split (tely_doc) - return [len (space_re.split (n)) for n in nodes] - - -class TelyDocument (object): - def __init__ (self, filename): - self.filename = filename - self.contents = open (filename).read () - - ## record title and sectionning level of first Texinfo section - m = title_re.search (self.contents) - if m: - self.title = m.group (2) - self.level = texi_level [m.group (1)] - else: - self.title = 'Untitled' - self.level = ('u', 1) - - m = language_re.search (self.contents) - if m: - self.language = m.group (1) - - included_files = [os.path.join (os.path.dirname (filename), t) - for t in include_re.findall (self.contents)] - self.included_files = [p for p in included_files if os.path.exists (p)] - - def print_title (self, section_number): - return section_number.increase (self.level) + self.title - - -class TranslatedTelyDocument (TelyDocument): - def __init__ (self, filename, masterdocument, parent_translation=None): - TelyDocument.__init__ (self, filename) - - self.masterdocument = masterdocument - if not hasattr (self, 'language') \ - and hasattr (parent_translation, 'language'): - self.language = parent_translation.language - if hasattr (self, 'language'): - self.translation = translation[self.language] - else: - self.translation = lambda x: x - self.title = self.translation (self.title) - - ## record authoring information - m = translators_re.search (self.contents) - if m: - self.translators = [n.strip () for n in m.group (1).split (',')] - else: - self.translators = parent_translation.translators - m = checkers_re.search (self.contents) - if m: - self.checkers = [n.strip () for n in m.group (1).split (',')] - elif isinstance (parent_translation, TranslatedTelyDocument): - self.checkers = parent_translation.checkers - else: - self.checkers = [] - - ## check whether translation is pre- or post-GDP - m = status_re.search (self.contents) - if m: - self.post_gdp = bool (post_gdp_re.search (m.group (1))) - else: - self.post_gdp = False - - ## record which parts (nodes) of the file are actually translated - self.partially_translated = not skeleton_str in self.contents - nodes = node_re.split (self.contents) - self.translated_nodes = [not untranslated_node_str in n for n in nodes] - - ## calculate translation percentage - master_total_word_count = sum (masterdocument.word_count) - translation_word_count = \ - sum ([masterdocument.word_count[k] * self.translated_nodes[k] - for k in range (min (len (masterdocument.word_count), - len (self.translated_nodes)))]) - self.translation_percentage = \ - 100 * translation_word_count / master_total_word_count - - ## calculate how much the file is outdated - (diff_string, error) = \ - buildlib.check_translated_doc (masterdocument.filename, self.filename, self.contents) - if error: - sys.stderr.write ('warning: %s: %s' % (self.filename, error)) - self.uptodate_percentage = None - else: - diff = diff_string.splitlines () - insertions = sum ([len (l) - 1 for l in diff - if l.startswith ('+') - and not l.startswith ('+++')]) - deletions = sum ([len (l) - 1 for l in diff - if l.startswith ('-') - and not l.startswith ('---')]) - outdateness_percentage = 50.0 * (deletions + insertions) / \ - (masterdocument.size + 0.5 * (deletions - insertions)) - self.uptodate_percentage = 100 - int (outdateness_percentage) - if self.uptodate_percentage > 100: - alternative = 50 - progress ("%s: strange uptodateness percentage %d %%, \ -setting to %d %%" % (self.filename, self.uptodate_percentage, alternative)) - self.uptodate_percentage = alternative - elif self.uptodate_percentage < 1: - alternative = 1 - progress ("%s: strange uptodateness percentage %d %%, \ -setting to %d %%" % (self.filename, self.uptodate_percentage, alternative)) - self.uptodate_percentage = alternative - - def completeness (self, formats=['long'], translated=False): - if translated: - translation = self.translation - else: - translation = lambda x: x - - if isinstance (formats, str): - formats = [formats] - p = self.translation_percentage - if p == 0: - status = 'not translated' - elif p == 100: - status = 'fully translated' - else: - status = 'partially translated' - return dict ([(f, translation (format_table[status][f]) % locals()) - for f in formats]) - - def uptodateness (self, formats=['long'], translated=False): - if translated: - translation = self.translation - else: - translation = lambda x: x - - if isinstance (formats, str): - formats = [formats] - p = self.uptodate_percentage - if p == None: - status = 'N/A' - elif p == 100: - status = 'up to date' - else: - status = 'outdated' - l = {} - for f in formats: - if f == 'color' and p != None: - l['color'] = percentage_color (p) - else: - l[f] = translation (format_table[status][f]) % locals () - return l - - def gdp_status (self): - if self.post_gdp: - return self.translation (format_table['post-GDP']) - else: - return self.translation (format_table['pre-GDP']) - - def short_html_status (self): - s = ' ' - if self.partially_translated: - s += '
    \n '.join (self.translators) + '
    \n' - if self.checkers: - s += ' ' + \ - '
    \n '.join (self.checkers) + '

    \n' - - c = self.completeness (['color', 'long']) - s += ' \ -%(long)s
    \n' % c - - if self.partially_translated: - u = self.uptodateness (['vague', 'color']) - s += ' \ -%(vague)s
    \n' % u - - s += ' \n' - return s - - def text_status (self): - s = self.completeness ('abbr')['abbr'] + ' ' - - if self.partially_translated: - s += self.uptodateness ('abbr')['abbr'] + ' ' - return s - - def html_status (self, numbering=SectionNumber ()): - if self.title == 'Untitled': - return '' - - if self.level[1] == 0: # if self is a master document - s = ''' - - ''' % self.print_title (numbering) - s += ''.join ([' \n' % self.translation (h) - for h in detailed_status_heads]) - s += ' \n' - s += ' \n \n' \ - % (self.translation (section_titles_string), - sum (self.masterdocument.word_count)) - - else: - s = ' \n \n' \ - % (self.print_title (numbering), - sum (self.masterdocument.word_count)) - - if self.partially_translated: - s += ' \n' - s += ' \n' - else: - s += ' \n' * 2 - - c = self.completeness (['color', 'short'], translated=True) - s += ' \n' % {'color': c['color'], - 'short': c['short']} - - if self.partially_translated: - u = self.uptodateness (['short', 'color'], translated=True) - s += ' \n' % {'color': u['color'], - 'short': u['short']} - else: - s += ' \n' - - s += ' \n \n' - s += ''.join ([i.translations[self.language].html_status (numbering) - for i in self.masterdocument.includes - if self.language in i.translations]) - - if self.level[1] == 0: # if self is a master document - s += '
    %s%s
    %s
    (%d)
    %s
    (%d)
    ' + '
    \n '.join (self.translators) + '
    ' + '
    \n '.join (self.checkers) + '
    \ -%(short)s\ -%(short)s' + self.gdp_status () + '
    \n

    \n' - return s - -class MasterTelyDocument (TelyDocument): - def __init__ (self, - filename, - parent_translations=dict ([(lang, None) - for lang in langdefs.LANGDICT])): - TelyDocument.__init__ (self, filename) - self.size = len (self.contents) - self.word_count = tely_word_count (self.contents) - translations = dict ([(lang, os.path.join (lang, filename)) - for lang in langdefs.LANGDICT]) - self.translations = \ - dict ([(lang, - TranslatedTelyDocument (translations[lang], - self, parent_translations.get (lang))) - for lang in langdefs.LANGDICT - if os.path.exists (translations[lang])]) - if self.translations: - self.includes = [MasterTelyDocument (f, self.translations) - for f in self.included_files] - else: - self.includes = [] - - def update_word_counts (self, s): - s = update_word_count (s, self.filename, sum (self.word_count)) - for i in self.includes: - s = i.update_word_counts (s) - return s - - def html_status (self, numbering=SectionNumber ()): - if self.title == 'Untitled' or not self.translations: - return '' - if self.level[1] == 0: # if self is a master document - s = ''' - - ''' % self.print_title (numbering) - s += ''.join ([' \n' % l for l in self.translations]) - s += ' \n' - s += ' \n \n' \ - % sum (self.word_count) - - else: # if self is an included file - s = ' \n \n' \ - % (self.print_title (numbering), sum (self.word_count)) - - s += ''.join ([t.short_html_status () - for t in self.translations.values ()]) - s += ' \n' - s += ''.join ([i.html_status (numbering) for i in self.includes]) - - if self.level[1] == 0: # if self is a master document - s += '
    %s%s
    Section titles
    (%d)
    %s
    (%d)
    \n

    \n' - return s - - def text_status (self, numbering=SectionNumber (), colspec=[48,12]): - if self.title == 'Untitled' or not self.translations: - return '' - - s = '' - if self.level[1] == 0: # if self is a master document - s += (self.print_title (numbering) + ' ').ljust (colspec[0]) - s += ''.join (['%s'.ljust (colspec[1]) % l - for l in self.translations]) - s += '\n' - s += ('Section titles (%d)' % \ - sum (self.word_count)).ljust (colspec[0]) - - else: - s = '%s (%d) ' \ - % (self.print_title (numbering), sum (self.word_count)) - s = s.ljust (colspec[0]) - - s += ''.join ([t.text_status ().ljust(colspec[1]) - for t in self.translations.values ()]) - s += '\n\n' - s += ''.join ([i.text_status (numbering) for i in self.includes]) - - if self.level[1] == 0: - s += '\n' - return s - - -update_category_word_counts_re = re.compile (r'(?ms)^-(\d+)-(.*?\n)\d+ *total') - -counts_re = re.compile (r'(?m)^(\d+) ') - -def update_category_word_counts_sub (m): - return '-' + m.group (1) + '-' + m.group (2) + \ - str (sum ([int (c) - for c in counts_re.findall (m.group (2))])).ljust (6) + \ - 'total' - - -progress ("Reading documents...") - -tely_files = \ - buildlib.read_pipe ("find -maxdepth 2 -name '*.tely'")[0].splitlines () -tely_files.sort () -master_docs = [MasterTelyDocument (os.path.normpath (filename)) - for filename in tely_files] -master_docs = [doc for doc in master_docs if doc.translations] - -main_status_page = open ('translations.template.html.in').read () - -enabled_languages = [l for l in langdefs.LANGDICT - if langdefs.LANGDICT[l].enabled - and l != 'en'] -lang_status_pages = \ - dict ([(l, open (os.path.join (l, 'translations.template.html.in')). read ()) - for l in enabled_languages]) - -progress ("Generating status pages...") - -date_time = buildlib.read_pipe ('LANG= date -u')[0] - -main_status_html = last_updated_string % date_time -main_status_html += '\n'.join ([doc.html_status () for doc in master_docs]) - -html_re = re.compile ('', re.I) -end_body_re = re.compile ('', re.I) - -html_header = ''' -''' - -main_status_page = html_re.sub (html_header, main_status_page) - -main_status_page = end_body_re.sub (main_status_html + '\n', - main_status_page) - -open ('translations.html.in', 'w').write (main_status_page) - -for l in enabled_languages: - date_time = buildlib.read_pipe ('LANG=%s date -u' % l)[0] - lang_status_pages[l] = translation[l] (last_updated_string) % date_time + lang_status_pages[l] - lang_status_page = html_re.sub (html_header, lang_status_pages[l]) - html_status = '\n'.join ([doc.translations[l].html_status () - for doc in master_docs - if l in doc.translations]) - lang_status_page = end_body_re.sub (html_status + '\n', - lang_status_page) - open (os.path.join (l, 'translations.html.in'), 'w').write (lang_status_page) - -main_status_txt = '''Documentation translations status -Generated %s -NT = not translated -FT = fully translated - -''' % date_time - -main_status_txt += '\n'.join ([doc.text_status () for doc in master_docs]) - -status_txt_file = 'out/translations-status.txt' -progress ("Writing %s..." % status_txt_file) -open (status_txt_file, 'w').write (main_status_txt) - -translation_instructions_file = 'TRANSLATION' -progress ("Updating %s..." % translation_instructions_file) -translation_instructions = open (translation_instructions_file).read () - -for doc in master_docs: - translation_instructions = doc.update_word_counts (translation_instructions) - -for html_file in re.findall (r'(?m)^\d+ *(\S+?\.html\S*?)(?: |$)', - translation_instructions): - word_count = sgml_word_count (open (html_file).read ()) - translation_instructions = update_word_count (translation_instructions, - html_file, - word_count) - -for po_file in re.findall (r'(?m)^\d+ *(\S+?\.po\S*?)(?: |$)', - translation_instructions): - word_count = po_word_count (open (po_file).read ()) - translation_instructions = update_word_count (translation_instructions, - po_file, - word_count) - -translation_instructions = \ - update_category_word_counts_re.sub (update_category_word_counts_sub, - translation_instructions) - -open (translation_instructions_file, 'w').write (translation_instructions) diff --git a/buildscripts/update-snippets.py b/buildscripts/update-snippets.py deleted file mode 100644 index 6b70c79467..0000000000 --- a/buildscripts/update-snippets.py +++ /dev/null @@ -1,102 +0,0 @@ -#!@PYTHON@ -# update-snippets.py - -# USAGE: update-snippets.py REFERENCE-DIR TARGET-DIR FILES -# -# update ly snippets in TARGET-DIR/FILES with snippets from REFERENCE-DIR/FILES -# -# More precisely, each existing FILE in TARGET-DIR is matched to the FILE in -# REFERENCE-DIR (it the latter does not exist, a warning is given). -# -# Shell wildcards expansion is performed on FILES. -# This script currently supports Texinfo format. -# Ly snippets preceded with a line containing '@c KEEP LY' in TARGET-DIR/FILES -# will not be updated. -# An error occurs if REFERENCE-DIR/FILE and TARGET-DIR/FILE do not have the -# same snippets count. - -import sys -import os -import glob -import re - -print "update-snippets.py" - -comment_re = re.compile (r'(? 0 and (not target_source[j-1].startswith ('@c KEEP LY')) and target_source[j] != ref_source[k]: - target_source[j] = ref_source[k] - c += 1 - changed_snippets_count += 1 - f = open (file, 'w') - f.write (''.join (target_source)) - sys.stderr.write ('%s: %d/%d snippets updated\n' % (file, c, snippet_count)) - -sys.stderr.write ('\nTotal: %d snippets, %d updated snippets.\n' % (total_snippet_count, changed_snippets_count)) -sys.exit (exit_code) diff --git a/buildscripts/www_post.py b/buildscripts/www_post.py deleted file mode 100644 index 1af5c922ec..0000000000 --- a/buildscripts/www_post.py +++ /dev/null @@ -1,100 +0,0 @@ -#!@PYTHON@ - -## This is www_post.py. This script is the main stage -## of toplevel GNUmakefile local-WWW-post target. - -# USAGE: www_post PACKAGE_NAME TOPLEVEL_VERSION OUTDIR TARGETS -# please call me from top of the source directory - -import sys -import os -import re - -import langdefs - -import mirrortree -import postprocess_html - -package_name, package_version, outdir, targets = sys.argv[1:] -targets = targets.split (' ') -outdir = os.path.normpath (outdir) -doc_dirs = ['input', 'Documentation', outdir] -target_pattern = os.path.join (outdir, '%s-root') - -# these redirection pages allow to go back to the documentation index -# from HTML manuals/snippets page -static_files = { - os.path.join (outdir, 'index.html'): - ''' -Redirecting to the documentation index...\n''', - os.path.join (outdir, 'VERSION'): - package_version + '\n', - os.path.join ('input', 'lsr', outdir, 'index.html'): - ''' -Redirecting to the documentation index...\n''' - } - -for l in langdefs.LANGUAGES: - static_files[os.path.join ('Documentation', 'user', outdir, l.file_name ('index', '.html'))] = \ - '\nRedirecting to the documentation index...\n' - -for f, contents in static_files.items (): - open (f, 'w').write (contents) - -sys.stderr.write ("Mirrorring...\n") -dirs, symlinks, files = mirrortree.walk_tree ( - tree_roots = doc_dirs, - process_dirs = outdir, - exclude_dirs = '(^|/)(' + r'|po|out|out-test|.*?[.]t2d|\w*?-root)(/|$)|Documentation/(' + '|'.join ([l.code for l in langdefs.LANGUAGES]) + ')', - find_files = r'.*?\.(?:midi|html|pdf|png|txt|i?ly|signature|css|zip|xml|mxl)$|VERSION', - exclude_files = r'lily-[0-9a-f]+.*\.(pdf|txt)') - -# actual mirrorring stuff -html_files = [] -hardlinked_files = [] -for f in files: - if f.endswith ('.html'): - html_files.append (f) - else: - hardlinked_files.append (f) -dirs = [re.sub ('/' + outdir, '', d) for d in dirs] -while outdir in dirs: - dirs.remove (outdir) -dirs = list (set (dirs)) -dirs.sort () - -strip_file_name = {} -strip_re = re.compile (outdir + '/') -for t in targets: - out_root = target_pattern % t - strip_file_name[t] = lambda s: os.path.join (target_pattern % t, (strip_re.sub ('', s))) - os.mkdir (out_root) - map (os.mkdir, [os.path.join (out_root, d) for d in dirs]) - for f in hardlinked_files: - os.link (f, strip_file_name[t] (f)) - for l in symlinks: - p = mirrortree.new_link_path (os.path.normpath (os.readlink (l)), os.path.dirname (l), strip_re) - dest = strip_file_name[t] (l) - if not os.path.exists (dest): - os.symlink (p, dest) - - ## ad-hoc renaming to make xrefs between PDFs work - os.rename (os.path.join (out_root, 'input/lsr/lilypond-snippets.pdf'), - os.path.join (out_root, 'Documentation/user/lilypond-snippets.pdf')) - -# need this for content negotiation with documentation index -if 'online' in targets: - f = open (os.path.join (target_pattern % 'online', 'Documentation/.htaccess'), 'w') - f.write ('#.htaccess\nDirectoryIndex index\n') - f.close () - -postprocess_html.build_pages_dict (html_files) -for t in targets: - sys.stderr.write ("Processing HTML pages for %s target...\n" % t) - postprocess_html.process_html_files ( - package_name = package_name, - package_version = package_version, - target = t, - name_filter = strip_file_name[t]) - diff --git a/configure.in b/configure.in index f9f5c622ea..3651171c46 100644 --- a/configure.in +++ b/configure.in @@ -76,7 +76,7 @@ else NCSB_FILE=`$FCMATCH --verbose "Century Schoolbook L:style=$style" | grep 'file:' | grep -v "\.ttf"` NCSB_FILE=`echo $NCSB_FILE | sed 's/^.*"\(.*\)".*$/\1/g'` - NCSB_FILE=`$PYTHON "$srcdir/buildscripts/readlink.py" $NCSB_FILE` + NCSB_FILE=`$PYTHON "$srcdir/scripts/aux/readlink.py" $NCSB_FILE` NCSB_SOURCE_FILES="$NCSB_FILE $NCSB_SOURCE_FILES" done else diff --git a/elisp/GNUmakefile b/elisp/GNUmakefile index 0eab184cd0..9dcdf1ce6e 100644 --- a/elisp/GNUmakefile +++ b/elisp/GNUmakefile @@ -16,11 +16,14 @@ include $(depth)/make/stepmake.make LILYPOND_WORDS = $(outdir)/lilypond-words.el LILYPOND_WORDS_DEPENDS =\ $(top-src-dir)/lily/lily-lexer.cc \ - $(buildscript-dir)/lilypond-words.py \ + $(buildscript-dir)/lilypond-words \ $(top-src-dir)/scm/markup.scm \ $(top-src-dir)/ly/engraver-init.ly +$(buildscript-dir)/lilypond-words: + make -C $(depth)/scripts/build + $(LILYPOND_WORDS): - cd $(top-src-dir) && $(PYTHON) buildscripts/lilypond-words.py --el --dir=$(top-build-dir)/elisp/$(outconfbase) + cd $(top-src-dir) && $(buildscript-dir)/lilypond-words --el --dir=$(top-build-dir)/elisp/$(outconfbase) all: $(LILYPOND_WORDS) diff --git a/elisp/SConscript b/elisp/SConscript deleted file mode 100644 index 380490c08b..0000000000 --- a/elisp/SConscript +++ /dev/null @@ -1,15 +0,0 @@ -# -*-python-*- - -Import ('env', 'install', 'src_glob') -sources = src_glob ('*.el') + ['lilypond-words.el'] - -e = env.Copy () -a = '$PYTHON $srcdir/buildscripts/lilypond-words.py --el --dir=${TARGET.dir}' -e.Command ('lilypond-words.el', - ['#/lily/lily-lexer.cc', - '#/buildscripts/lilypond-words.py', - '#/scm/markup.scm', - '#/ly/engraver-init.ly',], - a) - -install (sources, env['sharedir_package_version'] + '/elisp') diff --git a/flower/SConscript b/flower/SConscript deleted file mode 100644 index c7e0abd4a0..0000000000 --- a/flower/SConscript +++ /dev/null @@ -1,28 +0,0 @@ -# -*-python-*- - -name = 'flower' -outdir = Dir ('.').path - -Import ('env', 'src_glob') -sources = src_glob ('*.cc') - -e = env.Copy () -e.Append (CPPPATH = ['#/flower/include', outdir,]) -includes = src_glob ('include/*.hh') - -if 1: # ut - def test_source (x): - x.startswith ('test') - test_sources = filter (lambda x: x.startswith ('test'), sources) - sources = filter (lambda x: not x.startswith ('test'), sources) - ee = e.Copy () - ee.Append (LIBS = [name, 'boost_unit_test_framework']) - test = ee.Program ('test' + name, test_sources) - -if env['static']: - e.Library (name, sources) -if not env['static'] or env['shared']: - e.SharedLibrary (name, sources) - -po = env.Command ('lilypond.po', sources + includes, env['pocommand']) -env.Alias ('po-update', po) diff --git a/input/SConscript b/input/SConscript deleted file mode 100644 index 361c9027a6..0000000000 --- a/input/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'mutopia') -mutopia () diff --git a/input/lsr/GNUmakefile b/input/lsr/GNUmakefile index f91dacd38d..e479c09c0e 100644 --- a/input/lsr/GNUmakefile +++ b/input/lsr/GNUmakefile @@ -22,7 +22,7 @@ IN_ITELY_FILES = $(call src-wildcard,*-intro.itely) GENERATED_ITELY_FILES = $(IN_ITELY_FILES:%-intro.itely=$(outdir)/%.itely) $(outdir)/%.itely: %-intro.itely %.snippet-list - xargs $(PYTHON) $(buildscript-dir)/lys-to-tely.py -f doctitle,texidoc,verbatim --name=$@ --template=$< < $(filter %.snippet-list, $^) + xargs $(LYS_TO_TELY) -f doctitle,texidoc,verbatim --name=$@ --template=$< < $(filter %.snippet-list, $^) $(outdir)/lilypond-snippets.texi: $(GENERATED_ITELY_FILES) $(LY_FILES) diff --git a/input/lsr/README b/input/lsr/README index 1906148776..21bb3d6c2f 100644 --- a/input/lsr/README +++ b/input/lsr/README @@ -3,7 +3,7 @@ http://lsr.dsi.unimi.it/ To update this directory, do at top of the source tree -buildscripts/makelsr.py DIR +scripts/aux/makelsr.py DIR where DIR is the directory unpacked from lsr-snippets-doc-DATE tarball available on http://lsr.dsi.unimi.it/download. diff --git a/input/lsr/SConscript b/input/lsr/SConscript deleted file mode 100644 index c534e3573e..0000000000 --- a/input/lsr/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'collate') -collate (title = 'LilyPond Examples') diff --git a/input/lsr/avoiding-collisions-of-chord-fingering-with-beams.ly b/input/lsr/avoiding-collisions-of-chord-fingering-with-beams.ly index d91981e109..bdae18ce89 100644 --- a/input/lsr/avoiding-collisions-of-chord-fingering-with-beams.ly +++ b/input/lsr/avoiding-collisions-of-chord-fingering-with-beams.ly @@ -25,6 +25,14 @@ overridden: " doctitle = "Avoiding collisions of chord fingering with beams" + texidocfr = " +Les doigtés et les numéros de cordes attachés à des notes seules +évitent automatiquement les barres de ligature, mais ce n'est pas le +cas par défaut pour les doigtés ou numéros de cordes attachés aux +notes d'un accord. L'exemple qui suit montre comment ce comportement +par défaut peut être corrigé. +" + doctitlefr = "Éviter les collisions entre les doigtés d'accords et les ligatures" } % begin verbatim \relative c' { diff --git a/input/manual/SConscript b/input/manual/SConscript deleted file mode 100644 index ece0a25f0c..0000000000 --- a/input/manual/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'collate') -collate (title = 'LilyPond Examples from the Manual') diff --git a/input/mutopia/E.Satie/SConscript b/input/mutopia/E.Satie/SConscript deleted file mode 100644 index 361c9027a6..0000000000 --- a/input/mutopia/E.Satie/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'mutopia') -mutopia () diff --git a/input/mutopia/F.Schubert/SConscript b/input/mutopia/F.Schubert/SConscript deleted file mode 100644 index 361c9027a6..0000000000 --- a/input/mutopia/F.Schubert/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'mutopia') -mutopia () diff --git a/input/mutopia/J.S.Bach/SConscript b/input/mutopia/J.S.Bach/SConscript deleted file mode 100644 index 361c9027a6..0000000000 --- a/input/mutopia/J.S.Bach/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'mutopia') -mutopia () diff --git a/input/mutopia/R.Schumann/SConscript b/input/mutopia/R.Schumann/SConscript deleted file mode 100644 index 361c9027a6..0000000000 --- a/input/mutopia/R.Schumann/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'mutopia') -mutopia () diff --git a/input/mutopia/SConscript b/input/mutopia/SConscript deleted file mode 100644 index 361c9027a6..0000000000 --- a/input/mutopia/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'mutopia') -mutopia () diff --git a/input/mutopia/W.A.Mozart/SConscript b/input/mutopia/W.A.Mozart/SConscript deleted file mode 100644 index 361c9027a6..0000000000 --- a/input/mutopia/W.A.Mozart/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'mutopia') -mutopia () diff --git a/input/new/SConscript b/input/new/SConscript deleted file mode 100644 index f72bfffc28..0000000000 --- a/input/new/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'collate') -collate (title = 'Advanced snippets') diff --git a/input/regression/SConscript b/input/regression/SConscript deleted file mode 100644 index 708fea8782..0000000000 --- a/input/regression/SConscript +++ /dev/null @@ -1,4 +0,0 @@ -# -*-python-*- - -Import ('env', 'collate') -collate (title = 'LilyPond Regression Tests') diff --git a/input/texidocs/avoiding-collisions-of-chord-fingering-with-beams.texidoc b/input/texidocs/avoiding-collisions-of-chord-fingering-with-beams.texidoc index 77da596cff..89b0d4ba63 100644 --- a/input/texidocs/avoiding-collisions-of-chord-fingering-with-beams.texidoc +++ b/input/texidocs/avoiding-collisions-of-chord-fingering-with-beams.texidoc @@ -8,3 +8,13 @@ predeterminado: " doctitlees = "Evitar colisiones entre digitaciones de acordes y barras de corchea" + +%% Translation of GIT committish: 98dc713cb34b498f145badf23d14957367a19ece + texidocfr = " +Les doigtés et les numéros de cordes attachés à des notes seules +évitent automatiquement les barres de ligature, mais ce n'est pas le cas par +défaut pour les doigtés ou numéros de cordes attachés aux notes d'un +accord. L'exemple qui suit montre comment ce comportement par défaut +peut être corrigé. +" + doctitlefr = "Éviter les collisions entre les doigtés d'accords et les ligatures" diff --git a/lily/SConscript b/lily/SConscript deleted file mode 100644 index ebd623fee9..0000000000 --- a/lily/SConscript +++ /dev/null @@ -1,29 +0,0 @@ -# -*-python-*- - -Import ('env', 'src_glob', 'install') - -outdir = Dir ('.').abspath - -cc_sources = src_glob ('*.cc') -sources = cc_sources + ['parser.yy', 'lexer.ll'] -includes = src_glob ('include/*.hh') - -e = env.Copy () - -e.Append ( - CPPPATH = [ - '#/lily/include', - '#/flower/include', - outdir], - LEXFLAGS = ['-Cfe', '-p', '-p'], - LIBS = ['flower'], - ) - -e.HH ('parser.hh', 'parser.yy') -e.ParseConfig ('guile-config link') -lily = e.Program ('lilypond', sources) -install (lily, env['bindir']) - -# let's not, for now -#po = env.Command ('lilypond.po', cc_sources + includes, env['pocommand']) -#env.Alias ('po-update', po) diff --git a/ly/SConscript b/ly/SConscript deleted file mode 100644 index a615862404..0000000000 --- a/ly/SConscript +++ /dev/null @@ -1,5 +0,0 @@ -# -*-python-*- - -Import ('env', 'install', 'src_glob') -sources = src_glob ('*.ly') -install (sources, env['sharedir_package_version'] + '/ly') diff --git a/make/doc-i18n-root-targets.make b/make/doc-i18n-root-targets.make index 8afe50af84..3b97883dfd 100644 --- a/make/doc-i18n-root-targets.make +++ b/make/doc-i18n-root-targets.make @@ -1,4 +1,4 @@ default: local-WWW-2: $(OUT_HTML_FILES) - $(PYTHON) $(buildscript-dir)/mass-link.py --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/$(outdir) $(HTML_FILES) + $(buildscript-dir)/mass-link --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/$(outdir) $(HTML_FILES) diff --git a/make/doc-i18n-user-rules.make b/make/doc-i18n-user-rules.make index 66a97cc228..59bfe8b6f1 100644 --- a/make/doc-i18n-user-rules.make +++ b/make/doc-i18n-user-rules.make @@ -17,7 +17,7 @@ $(outdir)/%-big-page.html: $(outdir)/%.texi endif $(outdir)/%.pdftexi: $(outdir)/%.texi - $(PYTHON) $(buildscript-dir)/texi-gettext.py $(ISOLANG) $< + $(buildscript-dir)/texi-gettext $(ISOLANG) $< $(outdir)/%.pdf: $(outdir)/%.pdftexi cd $(outdir); texi2pdf $(TEXI2PDF_FLAGS) $(TEXINFO_PAPERSIZE_OPTION) $(notdir $*).pdftexi @@ -31,7 +31,7 @@ $(outdir)/%.png: $(top-build-dir)/Documentation/user/$(outdir)/%.png ln -f $< $@ $(XREF_MAPS_DIR)/%.$(ISOLANG).xref-map: $(outdir)/%.texi - $(PYTHON) $(buildscript-dir)/extract_texi_filenames.py -o $(XREF_MAPS_DIR) $< + $(buildscript-dir)/extract_texi_filenames -o $(XREF_MAPS_DIR) $< $(MASTER_TEXI_FILES): $(ITELY_FILES) $(ITEXI_FILES) diff --git a/make/doc-i18n-user-targets.make b/make/doc-i18n-user-targets.make index 44c9a1a3d6..4bb7c5b9c9 100644 --- a/make/doc-i18n-user-targets.make +++ b/make/doc-i18n-user-targets.make @@ -3,9 +3,9 @@ default: local-WWW-1: $(MASTER_TEXI_FILES) $(PDF_FILES) $(XREF_MAPS_FILES) local-WWW-2: $(DEEP_HTML_FILES) $(BIG_PAGE_HTML_FILES) $(DOCUMENTATION_LOCALE_TARGET) - find $(outdir) -name '*.html' | xargs grep -L 'UNTRANSLATED NODE: IGNORE ME' | xargs $(PYTHON) $(buildscript-dir)/html-gettext.py $(ISOLANG) - find $(outdir) -name '*.html' | xargs grep -L --label="" 'UNTRANSLATED NODE: IGNORE ME' | sed 's!$(outdir)/!!g' | xargs $(PYTHON) $(buildscript-dir)/mass-link.py --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir) $(TELY_FILES:%.tely=%.pdf) - find $(outdir) \( -name 'lily-*.png' -o -name 'lily-*.ly' \) | sed 's!$(outdir)/!!g' | xargs $(PYTHON) $(buildscript-dir)/mass-link.py hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir) + find $(outdir) -name '*.html' | xargs grep -L 'UNTRANSLATED NODE: IGNORE ME' | xargs $(buildscript-dir)/html-gettext $(ISOLANG) + find $(outdir) -name '*.html' | xargs grep -L --label="" 'UNTRANSLATED NODE: IGNORE ME' | sed 's!$(outdir)/!!g' | xargs $(buildscript-dir)/mass-link --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir) $(TELY_FILES:%.tely=%.pdf) + find $(outdir) \( -name 'lily-*.png' -o -name 'lily-*.ly' \) | sed 's!$(outdir)/!!g' | xargs $(buildscript-dir)/mass-link hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir) $(DOCUMENTATION_LOCALE_TARGET): $(MAKE) -C $(depth)/Documentation/po out=www messages diff --git a/make/generic-vars.make b/make/generic-vars.make index 428e5b900a..ffe17a38fc 100644 --- a/make/generic-vars.make +++ b/make/generic-vars.make @@ -8,7 +8,9 @@ # you do make dist # -buildscript-dir = $(src-depth)/buildscripts +buildscript-dir = $(top-build-dir)/scripts/build/$(outconfbase) +auxpython-dir = $(src-depth)/python/aux +auxscript-dir = $(src-depth)/scripts/aux script-dir = $(src-depth)/scripts input-dir = $(src-depth)/input @@ -18,7 +20,7 @@ mi2mu-dir = $(src-depth)/mi2mu make-dir = $(src-depth)/make include-flower = $(src-depth)/flower/include -export PYTHONPATH:=$(buildscript-dir):$(PYTHONPATH) +export PYTHONPATH:=$(auxpython-dir):$(PYTHONPATH) LILYPOND_INCLUDES = $(include-flower) $(depth)/flower/$(outdir) diff --git a/make/lilypond-vars.make b/make/lilypond-vars.make index 10b82d8d9f..4faeb8fcd5 100644 --- a/make/lilypond-vars.make +++ b/make/lilypond-vars.make @@ -3,7 +3,7 @@ ifeq ($(LILYPOND_EXTERNAL_BINARY),) # environment settings. -export PATH:=$(top-build-dir)/lily/$(outconfbase):$(top-build-dir)/buildscripts/$(outconfbase):$(top-build-dir)/scripts/$(outconfbase):$(PATH): +export PATH:=$(top-build-dir)/lily/$(outconfbase):$(buildscript-dir):$(top-build-dir)/scripts/$(outconfbase):$(PATH): export LILYPOND_BINARY=$(top-build-dir)/$(outconfbase)/bin/lilypond else @@ -77,5 +77,5 @@ export LYDOC_LOCALEDIR:= $(top-build-dir)/Documentation/po/out-www #texi-html for www only: LILYPOND_BOOK_FORMAT=$(if $(subst out-www,,$(notdir $(outdir))),texi,texi-html) LY2DVI = $(LILYPOND_BINARY) -LYS_TO_TELY = $(buildscript-dir)/lys-to-tely.py +LYS_TO_TELY = $(buildscript-dir)/lys-to-tely diff --git a/make/lysdoc-rules.make b/make/lysdoc-rules.make index 6dfa8e6f0b..6745887514 100644 --- a/make/lysdoc-rules.make +++ b/make/lysdoc-rules.make @@ -1,3 +1,3 @@ $(outdir)/collated-files.tely: $(COLLATED_FILES) - $(PYTHON) $(LYS_TO_TELY) --name=$(outdir)/collated-files.tely --title="$(TITLE)" $^ + $(LYS_TO_TELY) --name=$(outdir)/collated-files.tely --title="$(TITLE)" $^ diff --git a/mf/GNUmakefile b/mf/GNUmakefile index 898a4c245b..389f7b9baf 100644 --- a/mf/GNUmakefile +++ b/mf/GNUmakefile @@ -120,8 +120,8 @@ $(outdir)/aybabtu.fontname: $(outdir)/aybabtu.subfonts: echo $(subst .mf,,$(call src-wildcard,feta-braces-[a-z].mf)) > $@ -$(PE_SCRIPTS): $(top-build-dir)/buildscripts/$(outdir)/gen-emmentaler-scripts - $(PYTHON) $< --dir=$(outdir) +$(PE_SCRIPTS): $(buildscript-dir)/gen-emmentaler-scripts + $< --dir=$(outdir) ALL_FONTS = $(FETA_FONTS) PFB_FILES = $(ALL_FONTS:%=$(outdir)/%.pfb) @@ -174,12 +174,12 @@ $(outdir)/%.lisp \ $(outdir)/%.otf-gtable \ $(outdir)/%.enc \ $(outdir)/%.pe: $(outdir)/%.log - $(PYTHON) $(buildscript-dir)/mf-to-table.py \ - --global-lisp=$(outdir)/$('$(shell cd $(outdir); pwd)'' > $@ $(NCSB_OTFS): $(NCSB_SOURCE_FILES) \ - $(buildscript-dir)/pfx2ttf.fontforge + $(auxscript-dir)/pfx2ttf.fontforge $(foreach i, $(basename $(NCSB_SOURCE_FILES)), \ - $(FONTFORGE) -script $(buildscript-dir)/pfx2ttf.fontforge \ + $(FONTFORGE) -script $(auxscript-dir)/pfx2ttf.fontforge \ $(i).pfb $(i).afm $(outdir)/ && ) true # eof diff --git a/mf/SConscript b/mf/SConscript deleted file mode 100644 index 1a5f281d93..0000000000 --- a/mf/SConscript +++ /dev/null @@ -1,108 +0,0 @@ -# -*-python-*- - -import os -import re -import string - -Import ('env', 'base_glob', 'install') -feta = reduce (lambda x, y: x + y, - map (lambda x: base_glob (x), - ('feta[0-9]*.mf', - 'feta-alphabet*[0-9].mf', - 'feta-braces-[a-z]*.mf', - 'parmesan[0-9]*.mf',))) -feta = base_glob ('feta[0-9][0-9]*.mf') -feta_alphabet = base_glob ('feta-alphabet[0-9][0-9]*.mf') -feta_braces = base_glob ('feta-braces-[a-z].mf') -parmesan = base_glob ('parmesan[0-9][0-9]*.mf') - -fonts = feta + feta_alphabet + feta_braces + parmesan - -feta_sizes = map (lambda x: re.sub ('feta([0-9]+)', '\\1', x), feta) -otfs = map (lambda x: 'emmentaler-' + x, feta_sizes) + ['aybabtu'] - -t = map (env.TFM, fonts) -g = map (env.GTABLE, fonts) -p = map (env.PFA, fonts) -e = map (lambda x: x + '.enc', fonts) -s = map (lambda x: x + '.svg', fonts) -o = map (env.OTF, otfs) - -# Emmentaler -a = '''cat ${SOURCE} \ -$$(echo ${SOURCE} | grep -v brace | sed s/feta/parmesan/) \ -$$(echo ${SOURCE} | grep -v brace | sed s/feta/feta-alphabet/) \ -> ${TARGET}''' -otf_table = Builder (action = a, suffix = '.otf-table', - # barf - src_suffix = '.lisp') -env.Append (BUILDERS = {'OTF_TABLE': otf_table}) -f = map (env.OTF_TABLE, feta) -g = map (env.OTF_TABLE, feta_braces) - -map (lambda x: env.Depends ('feta' + x + '.otf-table', - ['parmesan' + x + '.lisp', - 'feta-alphabet' + x + '.lisp']), feta_sizes) - -map (lambda x: env.Depends ('emmentaler-' + x + '.otf', - 'feta' + x + '.otf-table'), - feta_sizes) - -map (lambda x: env.Depends ('emmentaler-' + x + '.otf', - ['feta' + x + '.pfa', - 'parmesan' + x + '.pfa', - 'feta-alphabet' + x + '.pfa']), feta_sizes) - -for i in feta_sizes: - env.Command ('emmentaler-%(i)s.pe' % locals (), - '$srcdir/buildscripts/gen-emmentaler-scripts.py', - '$PYTHON $srcdir/buildscripts/gen-emmentaler-scripts.py --dir=${TARGET.dir}') - -map (lambda x: env.Depends (x + '.pfa', x + '.enc'), feta) - - -# Aybabtu - -feta_braces_pfa = map (lambda x: x + '.pfa', feta_braces) - -env.AT_COPY ('aybabtu.pe.in') -env.Command ('aybabtu.fontname', '', 'echo -n aybabtu > $TARGET') -env.Command ('aybabtu.subfonts', - map (lambda x: x + '.mf', feta_braces), - 'echo ${SOURCES.filebase} > $TARGET') - -env.Command ('aybabtu.otf-table', - map (lambda x: x + '.otf-table', feta_braces), - 'cd ${TARGET.dir} && cat ${SOURCES.file} > ${TARGET.file}') - -env.Command ('aybabtu.otf-gtable', - map (lambda x: x + '.otf-gtable', feta_braces), - 'echo "(design_size . 20)" > $TARGET') - -env.Depends ('aybabtu.otf', - feta_braces_pfa - + ['aybabtu.subfonts', - 'aybabtu.fontname', - 'aybabtu.otf-table', - 'aybabtu.otf-gtable']) - -## FIXME: building only a few fonts does not seem to work anymore. -## what is essential these days, aybabtu/emmentaler are needed always? -mf_essential = ['feta16', 'feta20', 'parmesan16', ] -pfa_essential = map (env.PFA, mf_essential) + ['emmentaler-20.otf'] -env.Alias ('mf-essential', pfa_essential) -env.Alias ('mf-essential', 'fonts.cache-1') - -env['fonts'] = string.join (fonts) -env['feta_sizes'] = string.join (feta_sizes) - -env.Alias ('mf', pfa_essential + p + map (lambda x: x[0], o)) -env.Alias ('mf', s) -env.Alias ('mf', 'fonts.cache-1') - -install (t, env['sharedir_package_version'] + '/fonts/tfm') -install (p, env['sharedir_package_version'] + '/fonts/type1') -install (o, env['sharedir_package_version'] + '/fonts/otf') -install (e, env['sharedir_package_version'] + '/ps') -install (s, env['sharedir_package_version'] + '/fonts/svg') - diff --git a/po/SConscript b/po/SConscript deleted file mode 100644 index ac2534bd09..0000000000 --- a/po/SConscript +++ /dev/null @@ -1,15 +0,0 @@ -# -*-python-*- - -Import ('env', 'base_glob', 'install') -pos = base_glob ('*.po') -mos = map (env.MO, pos) - -install (mos, env['localedir']) - -env.Depends ('po', 'po-update') -# map (lambda x: env.Depends (x + '.mo', x + '.pom'), pos) -poms = map (env.POMERGE, pos) -env.Alias ('po-update', poms) -env.Alias ('po', mos) -#env.Alias ('all', mos) - diff --git a/po/TODO b/po/TODO index bf0e29e069..fd7f1c31e7 100644 --- a/po/TODO +++ b/po/TODO @@ -27,8 +27,7 @@ FIX OR STANDARDISE TARGETS the file may accumulate the list of obsolete translations, which may help to translate some changed entries and may be safely dropped out. -* because I never install LilyPond, I (check-out buildscripts/set-lily.sh) - made these links: +* because I never install LilyPond, I made these links: ln -s $LILYPOND_SOURCEDIR/po/out/nl.mo $PREFIX/usr/share/locale/nl/LC_MESSAGES/lilypond.mo diff --git a/ps/SConscript b/ps/SConscript deleted file mode 100644 index 917e9f9b7f..0000000000 --- a/ps/SConscript +++ /dev/null @@ -1,11 +0,0 @@ -# -*-python-*- - -Import ('env') - -dir = env['DESTDIR'] + env['sharedir_package_version'] + '/ps' -env.Install (dir, ['lilyponddefs.ps',]) -env.Alias ('install', dir) - -dir = env['DESTDIR'] + env['sharedir_package_version'] + '/tex' -env.Install (dir, ['music-drawing-routines.ps',]) -env.Alias ('install', dir) diff --git a/python/GNUmakefile b/python/GNUmakefile index a520a5efd6..5e706b1c50 100644 --- a/python/GNUmakefile +++ b/python/GNUmakefile @@ -1,5 +1,7 @@ depth = .. +SUBDIRS=aux + STEPMAKE_TEMPLATES=c python-module install-out po include $(depth)/make/stepmake.make diff --git a/python/SConscript b/python/SConscript deleted file mode 100644 index 52c4e1d2a1..0000000000 --- a/python/SConscript +++ /dev/null @@ -1,13 +0,0 @@ -# -*-python-*- - -Import ('env', 'install', 'src_glob') -py = src_glob ('*.py') -c = src_glob ('*.c') - -cm = map (env.SharedObject, c) - -py -cm - -install (py, env['sharedir_package_version'] + '/python') -install (cm, env['libdir_package_version'] + '/python') diff --git a/python/aux/GNUmakefile b/python/aux/GNUmakefile new file mode 100644 index 0000000000..665812d00f --- /dev/null +++ b/python/aux/GNUmakefile @@ -0,0 +1,10 @@ +depth=../.. + +EXTRA_DIST_FILES = $(call src-wildcard,*.py) + +include $(depth)/make/stepmake.make + +default: + +local-clean: + rm -f *.pyc diff --git a/python/aux/buildlib.py b/python/aux/buildlib.py new file mode 100644 index 0000000000..cd99586ff8 --- /dev/null +++ b/python/aux/buildlib.py @@ -0,0 +1,42 @@ +#!@PYTHON@ + +import subprocess +import re +import sys + +verbose = False + +def read_pipe (command): + child = subprocess.Popen (command, + stdout = subprocess.PIPE, + stderr = subprocess.PIPE, + shell = True) + (output, error) = child.communicate () + code = str (child.wait ()) + if not child.stdout or child.stdout.close (): + print "pipe failed: %(command)s" % locals () + if code != '0': + error = code + ' ' + error + return (output, error) + +revision_re = re.compile ('GIT [Cc]ommittish:\s+([a-f0-9]+)') +vc_diff_cmd = 'git diff %(color_flag)s %(revision)s HEAD -- %(original)s | cat' + +def check_translated_doc (original, translated_file, translated_contents, color=False): + m = revision_re.search (translated_contents) + if not m: + sys.stderr.write ('error: ' + translated_file + \ + ": no 'GIT committish: ' found.\nPlease check " + \ + 'the whole file against the original in English, then ' + \ + 'fill in HEAD committish in the header.\n') + sys.exit (1) + revision = m.group (1) + + if color: + color_flag = '--color' + else: + color_flag = '--no-color' + c = vc_diff_cmd % vars () + if verbose: + sys.stderr.write ('running: ' + c) + return read_pipe (c) diff --git a/python/aux/manuals_definitions.py b/python/aux/manuals_definitions.py new file mode 100644 index 0000000000..e8e6d50cd9 --- /dev/null +++ b/python/aux/manuals_definitions.py @@ -0,0 +1,11 @@ +#!/usr/bin/python + +# This module is imported by check_texi_refs.py + +references_dict = { + 'lilypond': 'ruser', + 'lilypond-learning': 'rlearning', + 'lilypond-program': 'rprogram', + 'lilypond-snippets': 'rlsr', + 'music-glossary': 'rglos', + 'lilypond-internals': 'rinternals' } diff --git a/python/aux/mirrortree.py b/python/aux/mirrortree.py new file mode 100644 index 0000000000..0aa0bc8812 --- /dev/null +++ b/python/aux/mirrortree.py @@ -0,0 +1,62 @@ +#!@PYTHON@ + +import re +import os + +def new_link_path (link, dir, r): + l = link.split ('/') + d = dir.split ('/') + i = 0 + while i < len(d) and i < len(l) and l[i] == '..': + if r.match (d[i]): + del l[i] + else: + i += 1 + return '/'.join ([x for x in l if not r.match (x)]) + +def walk_tree (tree_roots = [], + process_dirs = '.*', + exclude_dirs = '', + find_files = '.*', + exclude_files = ''): + """Walk directory trees and.returns (dirs, symlinks, files, extra_files) tuple. + + Arguments: + tree_roots=DIRLIST use DIRLIST as tree roots list + process_dir=PATTERN only process files in directories named PATTERN + exclude_dir=PATTERN don't recurse into directories named PATTERN + find_files=PATTERN filters files which are hardlinked + exclude_files=PATTERN exclude files named PATTERN + """ + find_files_re = re.compile (find_files) + exclude_dirs_re = re.compile (exclude_dirs) + exclude_files_re = re.compile (exclude_files) + process_dirs_re = re.compile (process_dirs) + + dirs_paths = [] + symlinks_paths = [] + files_paths = [] + + for d in tree_roots: + for current_dir, dirs, files in os.walk(d): + i = 0 + while i < len(dirs): + if exclude_dirs_re.search (os.path.join (current_dir, dirs[i])): + del dirs[i] + else: + p = os.path.join (current_dir, dirs[i]) + if os.path.islink (p): + symlinks_paths.append (p) + i += 1 + if not process_dirs_re.search (current_dir): + continue + dirs_paths.append (current_dir) + for f in files: + if exclude_files_re.match (f): + continue + p = os.path.join (current_dir, f) + if os.path.islink (p): + symlinks_paths.append (p) + elif find_files_re.match (f): + files_paths.append (p) + return (dirs_paths, symlinks_paths, files_paths) diff --git a/python/aux/postprocess_html.py b/python/aux/postprocess_html.py new file mode 100644 index 0000000000..e94da79755 --- /dev/null +++ b/python/aux/postprocess_html.py @@ -0,0 +1,361 @@ +#!@PYTHON@ + +""" +Postprocess HTML files: +add footer, tweak links, add language selection menu. +""" +import re +import os +import time +import operator + +import langdefs + +# This is to try to make the docball not too big with almost duplicate files +# see process_links() +non_copied_pages = ['Documentation/user/out-www/lilypond-big-page', + 'Documentation/user/out-www/lilypond-internals-big-page', + 'Documentation/user/out-www/lilypond-learning-big-page', + 'Documentation/user/out-www/lilypond-program-big-page', + 'Documentation/user/out-www/music-glossary-big-page', + 'out-www/examples', + 'Documentation/topdocs', + 'Documentation/bibliography', + 'Documentation/out-www/THANKS', + 'Documentation/out-www/DEDICATION', + 'Documentation/out-www/devel', + 'input/'] + +def _doc (s): + return s + +header = r""" +""" + +footer = ''' + +''' +footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).') +# ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process +footer_report_links = _doc ('Your suggestions for the documentation are welcome, please report errors to our bug list.') + + +mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs' +suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding' + +header_tag = '' +header_tag_re = re.compile (header_tag) + +footer_tag = '' +footer_tag_re = re.compile (footer_tag) + +lang_available = _doc ("Other languages: %s.") +browser_lang = _doc ('About automatic language selection.') +browser_language_url = "/web/about/browser-language" + +LANGUAGES_TEMPLATE = ''' +

    + %(language_available)s +
    + %(browser_language)s +

    +''' + + +html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$') +pages_dict = {} + +def build_pages_dict (filelist): + """Build dictionary of available translations of each page""" + global pages_dict + for f in filelist: + m = html_re.match (f) + if m: + g = m.groups() + if len (g) <= 1 or g[1] == None: + e = '' + else: + e = g[1] + if not g[0] in pages_dict: + pages_dict[g[0]] = [e] + else: + pages_dict[g[0]].append (e) + +def source_links_replace (m, source_val): + return 'href="' + os.path.join (source_val, m.group (1)) + '"' + +splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\ +Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\ +lilypond-learning))/') + +snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets') +user_ref_re = re.compile ('href="(?:\.\./)?lilypond\ +(-internals|-learning|-program|(?!-snippets))') + +docindex_link_re = re.compile (r'href="index.html"') + + +## Windows does not support symlinks. +# This function avoids creating symlinks for splitted HTML manuals +# Get rid of symlinks in GNUmakefile.in (local-WWW-post) +# this also fixes missing PNGs only present in translated docs +def hack_urls (s, prefix): + if splitted_docs_re.match (prefix): + s = re.sub ('(href|src)="(../lily-.*?|.*?[.]png)"', '\\1="../\\2"', s) + + # fix xrefs between documents in different directories ad hoc + if 'user/out-www/lilypond' in prefix: + s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s) + elif 'input/lsr' in prefix: + s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s) + + # we also need to replace in the lsr, which is already processed above! + if 'input/' in prefix or 'Documentation/topdocs' in prefix: + # fix the link from the regtest, lsr and topdoc pages to the doc index + # (rewrite prefix to obtain the relative path of the doc index page) + rel_link = re.sub (r'out-www/.*$', '', prefix) + rel_link = re.sub (r'[^/]*/', '../', rel_link) + if 'input/regression' in prefix: + indexfile = "Documentation/devel" + else: + indexfile = "index" + s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s) + + source_path = os.path.join (os.path.dirname (prefix), 'source') + if not os.path.islink (source_path): + return s + source_val = os.readlink (source_path) + return re.sub ('href="source/(.*?)"', lambda m: source_links_replace (m, source_val), s) + +body_tag_re = re.compile ('(?i)]*)>') +html_tag_re = re.compile ('(?i)') +doctype_re = re.compile ('(?i)\n' +css_re = re.compile ('(?i)]*)href="[^">]*?lilypond.*\.css"([^>]*)>') +end_head_tag_re = re.compile ('(?i)') +css_link = """ + + + +""" + + +def add_header (s, prefix): + """Add header (, doctype and CSS)""" + if header_tag_re.search (s) == None: + body = '' + (s, n) = body_tag_re.subn (body + header, s, 1) + if not n: + (s, n) = html_tag_re.subn ('' + header, s, 1) + if not n: + s = header + s + + s = header_tag + '\n' + s + + if doctype_re.search (s) == None: + s = doctype + s + + if css_re.search (s) == None: + depth = (prefix.count ('/') - 1) * '../' + s = end_head_tag_re.sub ((css_link % {'rel': depth}) + '', s) + return s + +title_tag_re = re.compile ('.*?(.*?)', re.DOTALL) +AT_web_title_re = re.compile ('@WEB-TITLE@') + +def add_title (s): + # urg + # maybe find first node? + fallback_web_title = '-- --' + m = title_tag_re.match (s) + if m: + fallback_web_title = m.group (1) + s = AT_web_title_re.sub (fallback_web_title, s) + return s + +footer_insert_re = re.compile ('') +end_body_re = re.compile ('(?i)') +end_html_re = re.compile ('(?i)') + +def add_footer (s, footer_text): + """add footer""" + (s, n) = footer_insert_re.subn (footer_text + '\n' + '', s, 1) + if not n: + (s, n) = end_body_re.subn (footer_text + '\n' + '', s, 1) + if not n: + (s, n) = end_html_re.subn (footer_text + '\n' + '', s, 1) + if not n: + s += footer_text + '\n' + return s + +def find_translations (prefix, lang_ext): + """find available translations of a page""" + available = [] + missing = [] + for l in langdefs.LANGUAGES: + e = l.webext + if lang_ext != e: + if e in pages_dict[prefix]: + available.append (l) + elif lang_ext == '' and l.enabled and reduce (operator.and_, + [not prefix.startswith (s) + for s in non_copied_pages]): + # English version of missing translated pages will be written + missing.append (e) + return available, missing + +online_links_re = re.compile ('''(href|src)=['"]\ +((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\ +([.]html)(#[^"']*|)['"]''') +offline_links_re = re.compile ('href=[\'"]\ +((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]') +big_page_name_re = re.compile ('''(.+?)-big-page''') + +def process_i18n_big_page_links (match, prefix, lang_ext): + big_page_name = big_page_name_re.match (match.group (1)) + if big_page_name: + destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix), + big_page_name.group (0))) + if not lang_ext in pages_dict[destination_path]: + return match.group (0) + return 'href="' + match.group (1) + '.' + lang_ext \ + + match.group (2) + match.group (3) + '"' + +def process_links (s, prefix, lang_ext, file_name, missing, target): + page_flavors = {} + if target == 'online': + # Strip .html, suffix for auto language selection (content + # negotiation). The menu must keep the full extension, so do + # this before adding the menu. + page_flavors[file_name] = \ + [lang_ext, online_links_re.sub ('\\1="\\2\\4"', s)] + elif target == 'offline': + # in LANG doc index: don't rewrite .html suffixes + # as not all .LANG.html pages exist; + # the doc index should be translated and contain links with the right suffixes + if prefix == 'Documentation/out-www/index': + page_flavors[file_name] = [lang_ext, s] + elif lang_ext == '': + page_flavors[file_name] = [lang_ext, s] + for e in missing: + page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = \ + [e, offline_links_re.sub ('href="\\1.' + e + '\\2\\3"', s)] + else: + # For saving bandwidth and disk space, we don't duplicate big pages + # in English, so we must process translated big pages links differently. + if 'big-page' in prefix: + page_flavors[file_name] = \ + [lang_ext, + offline_links_re.sub \ + (lambda match: process_i18n_big_page_links (match, prefix, lang_ext), + s)] + else: + page_flavors[file_name] = \ + [lang_ext, + offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)] + return page_flavors + +def add_menu (page_flavors, prefix, available, target, translation): + for k in page_flavors: + language_menu = '' + languages = '' + if page_flavors[k][0] != '': + t = translation[page_flavors[k][0]] + else: + t = _doc + for lang in available: + lang_file = lang.file_name (os.path.basename (prefix), '.html') + if language_menu != '': + language_menu += ', ' + language_menu += '%s' % (lang_file, t (lang.name)) + if target == 'offline': + browser_language = '' + elif target == 'online': + browser_language = t (browser_lang) % browser_language_url + if language_menu: + language_available = t (lang_available) % language_menu + languages = LANGUAGES_TEMPLATE % vars () + page_flavors[k][1] = add_footer (page_flavors[k][1], languages) + return page_flavors + + +def process_html_files (package_name = '', + package_version = '', + target = 'offline', + name_filter = lambda s: s): + """Add header, footer and tweak links to a number of HTML files + + Arguments: + package_name=NAME set package_name to NAME + package_version=VERSION set package version to VERSION + targets=offline|online set page processing depending on the target + offline is for reading HTML pages locally + online is for hosting the HTML pages on a website with content + negotiation + name_filter a HTML file name filter + """ + translation = langdefs.translation + localtime = time.strftime ('%c %Z', time.localtime (time.time ())) + + if "http://" in mail_address: + mail_address_url = mail_address + else: + mail_address_url= 'mailto:' + mail_address + + versiontup = package_version.split ('.') + branch_str = _doc ('stable-branch') + if int (versiontup[1]) % 2: + branch_str = _doc ('development-branch') + + # Initialize dictionaries for string formatting + subst = {} + subst[''] = dict ([i for i in globals ().items() if type (i[1]) is str]) + subst[''].update (dict ([i for i in locals ().items() if type (i[1]) is str])) + for l in translation: + e = langdefs.LANGDICT[l].webext + if e: + subst[e] = {} + for name in subst['']: + subst[e][name] = translation[l] (subst[''][name]) + # Do deeper string formatting as early as possible, + # so only one '%' formatting pass is needed later + for e in subst: + subst[e]['footer_name_version'] = subst[e]['footer_name_version'] % subst[e] + subst[e]['footer_report_links'] = subst[e]['footer_report_links'] % subst[e] + + for prefix, ext_list in pages_dict.items (): + for lang_ext in ext_list: + file_name = langdefs.lang_file_name (prefix, lang_ext, '.html') + in_f = open (file_name) + s = in_f.read() + in_f.close() + + s = s.replace ('%', '%%') + s = hack_urls (s, prefix) + s = add_header (s, prefix) + + ### add footer + if footer_tag_re.search (s) == None: + s = add_footer (s, footer_tag + footer) + + available, missing = find_translations (prefix, lang_ext) + page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target) + # Add menu after stripping: must not have autoselection for language menu. + page_flavors = add_menu (page_flavors, prefix, available, target, translation) + for k in page_flavors: + page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]] + out_f = open (name_filter (k), 'w') + out_f.write (page_flavors[k][1]) + out_f.close() + # if the page is translated, a .en.html symlink is necessary for content negotiation + if target == 'online' and ext_list != ['']: + os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html')) diff --git a/scm/SConscript b/scm/SConscript deleted file mode 100644 index 396bafba8e..0000000000 --- a/scm/SConscript +++ /dev/null @@ -1,5 +0,0 @@ -# -*-python-*- - -Import ('env', 'install', 'src_glob') -sources = src_glob ('*.scm') -install (sources, env['sharedir_package_version'] + '/scm') diff --git a/scm/framework-ps.scm b/scm/framework-ps.scm index 8ccd3c04ed..c091a44f47 100644 --- a/scm/framework-ps.scm +++ b/scm/framework-ps.scm @@ -290,7 +290,7 @@ (if (and (not embed) (equal? 'regular (stat:type (stat full-name))) (equal? name (ly:ttf-ps-name full-name))) - (set! embed (font-file-as-ps-string name full-name))) + (set! embed (font-file-as-ps-string name full-name 0))) (if (or (equal? "." f) (equal? ".." f)) #t diff --git a/scripts/GNUmakefile b/scripts/GNUmakefile index bae147532a..1813590ff1 100644 --- a/scripts/GNUmakefile +++ b/scripts/GNUmakefile @@ -1,5 +1,7 @@ depth = .. +SUBDIRS=aux build + SEXECUTABLES=convert-ly lilypond-book abc2ly etf2ly midi2ly lilypond-invoke-editor musicxml2ly lilysong lilymidi STEPMAKE_TEMPLATES=script help2man po diff --git a/scripts/SConscript b/scripts/SConscript deleted file mode 100644 index a69a637a83..0000000000 --- a/scripts/SConscript +++ /dev/null @@ -1,10 +0,0 @@ -# -*-python-*- - -Import ('env', 'install', 'src_glob') -sources = src_glob ('*.py') -scripts = map (env.AT_COPY, sources) - -install (scripts, env['bindir']) - -po = env.Command ('lilypond.po', sources, env['pocommand']) -env.Alias ('po-update', po) diff --git a/scripts/aux/GNUmakefile b/scripts/aux/GNUmakefile new file mode 100644 index 0000000000..a80126d541 --- /dev/null +++ b/scripts/aux/GNUmakefile @@ -0,0 +1,8 @@ +depth=../.. + +EXTRA_DIST_FILES = $(call src-wildcard,*.sh) $(call src-wildcard,*.py) +EXTRA_DIST_FILES += pfx2ttf.fontforge + +include $(depth)/make/stepmake.make + +default: diff --git a/scripts/aux/build-coverage.sh b/scripts/aux/build-coverage.sh new file mode 100755 index 0000000000..369b526e84 --- /dev/null +++ b/scripts/aux/build-coverage.sh @@ -0,0 +1,67 @@ +#!/bin/sh + +if test "$1" == "--fresh"; then + fresh=yes +fi + +if test ! -f config-cov.make; then + fresh=yes +fi + +if test "$fresh" = "yes"; +then + ./configure --enable-config=cov --disable-optimising \ + && make conf=cov -j2 clean \ + && perl -i~ -pe 's/-pipe /-fprofile-arcs -ftest-coverage -pipe /g' config-cov.make \ + && perl -i~ -pe 's/ -ldl / -lgcov -ldl /g' config-cov.make +else + find -name '*.gcda' -exec rm '{}' ';' +fi + +mkdir -p scripts/out-cov/ +touch scripts/out-cov/midi2ly scripts/out-cov/midi2ly.1 +make conf=cov -j2 && \ + make conf=cov test-clean OUT_TEST=testcov LILYPOND_JOBS= && \ + make conf=cov test OUT_TEST=testcov LILYPOND_JOBS='-dtrace-scheme-coverage ' + +if test "$?" != "0"; then + tail -100 out-cov/test-run.log + exit 1 +fi + +depth=../.. +resultdir=out/coverage-results + +rm -rf $resultdir +mkdir $resultdir +cd $resultdir + +ln $depth/lily/* . +ln $depth/scm/*.scm . +mv $depth/input/regression/out-testcov/*.scm.cov . +ln $depth/ly/*.ly . +ln $depth/lily/out-cov/*[ch] . +mkdir include +ln $depth/lily/include/* include/ +ln $depth/flower/include/* include/ +for a in *[cl] *.yy +do + gcov -o $depth/lily/out-cov/ -p $a > $a.gcov-summary +done + +$depth/scripts/aux/coverage.py --uncovered *.cc > uncovered.txt +$depth/scripts/aux/coverage.py --hotspots *.cc > hotspots.txt +$depth/scripts/aux/coverage.py --summary *.cc > summary.txt +$depth/scripts/aux/coverage.py --uncovered *.scm > uncovered-scheme.txt + +head -20 summary.txt + +cat < long-score.ly << EOF +\version "2.10.0" +foo = \new Staff \new Voice \repeat unfold 50 \relative { c4 d8[ d16( e]~ e16[ e e) f] g8 } +\score { + \new ChoirStaff << + \foo \foo \foo \foo + \foo \foo \foo \foo + + >> + \midi {} + \layout {} +} +EOF + +rm gmon.sum + +exe=$depth/out-prof/bin/lilypond + +## todo: figure out representative sample. +files="wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 mozart-hrn-3 mozart-hrn-3 long-score" + + + +$exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \ + -I $depth/input/mutopia/W.A.Mozart/ \ + $files + + +for a in *.profile; do + echo $a + cat $a +done + +echo 'running gprof' +gprof $exe > profile + +exit 0 + + +## gprof -s takes forever. +for a in seq 1 3; do + for f in $files ; do + $exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \ + -I $depth/input/mutopia/W.A.Mozart/ \ + $f + + echo 'running gprof' + if test -f gmon.sum ; then + gprof -s $exe gmon.out gmon.sum + else + mv gmon.out gmon.sum + fi + done +done + +gprof $exe gmon.sum > profile diff --git a/scripts/aux/check_texi_refs.py b/scripts/aux/check_texi_refs.py new file mode 100755 index 0000000000..dff7e334f1 --- /dev/null +++ b/scripts/aux/check_texi_refs.py @@ -0,0 +1,521 @@ +#!/usr/bin/env python + +""" +check_texi_refs.py +Interactive Texinfo cross-references checking and fixing tool + +""" + + +import sys +import re +import os +import optparse +import imp + +outdir = 'out-www' + +log = sys.stderr +stdout = sys.stdout + +file_not_found = 'file not found in include path' + +warn_not_fixed = '*** Warning: this broken x-ref has not been fixed!\n' + +opt_parser = optparse.OptionParser (usage='check_texi_refs.py [OPTION]... FILE', + description='''Check and fix \ +cross-references in a collection of Texinfo +documents heavily cross-referenced each other. +''') + +opt_parser.add_option ('-a', '--auto-fix', + help="Automatically fix cross-references whenever \ +it is possible", + action='store_true', + dest='auto_fix', + default=False) + +opt_parser.add_option ('-b', '--batch', + help="Do not run interactively", + action='store_false', + dest='interactive', + default=True) + +opt_parser.add_option ('-c', '--check-comments', + help="Also check commented out x-refs", + action='store_true', + dest='check_comments', + default=False) + +opt_parser.add_option ('-p', '--check-punctuation', + help="Check punctuation after x-refs", + action='store_true', + dest='check_punctuation', + default=False) + +opt_parser.add_option ("-I", '--include', help="add DIR to include path", + metavar="DIR", + action='append', dest='include_path', + default=[os.path.abspath (os.getcwd ())]) + +(options, files) = opt_parser.parse_args () + +class InteractionError (Exception): + pass + + +manuals_defs = imp.load_source ('manuals_defs', files[0]) +manuals = {} + +def find_file (name, prior_directory='.'): + p = os.path.join (prior_directory, name) + out_p = os.path.join (prior_directory, outdir, name) + if os.path.isfile (p): + return p + elif os.path.isfile (out_p): + return out_p + + # looking for file in include_path + for d in options.include_path: + p = os.path.join (d, name) + if os.path.isfile (p): + return p + + # file not found in include_path: looking in `outdir' subdirs + for d in options.include_path: + p = os.path.join (d, outdir, name) + if os.path.isfile (p): + return p + + raise EnvironmentError (1, file_not_found, name) + + +exit_code = 0 + +def set_exit_code (n): + global exit_code + exit_code = max (exit_code, n) + + +if options.interactive: + try: + import readline + except: + pass + + def yes_prompt (question, default=False, retries=3): + d = {True: 'y', False: 'n'}.get (default, False) + while retries: + a = raw_input ('%s [default: %s]' % (question, d) + '\n') + if a.lower ().startswith ('y'): + return True + if a.lower ().startswith ('n'): + return False + if a == '' or retries < 0: + return default + stdout.write ("Please answer yes or no.\n") + retries -= 1 + + def search_prompt (): + """Prompt user for a substring to look for in node names. + +If user input is empty or matches no node name, return None, +otherwise return a list of (manual, node name, file) tuples. + +""" + substring = raw_input ("Enter a substring to search in node names \ +(press Enter to skip this x-ref):\n") + if not substring: + return None + substring = substring.lower () + matches = [] + for k in manuals: + matches += [(k, node, manuals[k]['nodes'][node][0]) + for node in manuals[k]['nodes'] + if substring in node.lower ()] + return matches + +else: + def yes_prompt (question, default=False, retries=3): + return default + + def search_prompt (): + return None + + +ref_re = re.compile \ + ('@(ref|ruser|rlearning|rprogram|rglos)(?:\\{(?P[^,\\\\\\}]+?)|\ +named\\{(?P[^,\\\\]+?),(?P[^,\\\\\\}]+?))\\}(?P.)', + re.DOTALL) +node_include_re = re.compile (r'(?m)^@(node|include)\s+(.+?)$') + +whitespace_re = re.compile (r'\s+') +line_start_re = re.compile ('(?m)^') + +def which_line (index, newline_indices): + """Calculate line number of a given string index + +Return line number of string index index, where +newline_indices is an ordered iterable of all newline indices. +""" + inf = 0 + sup = len (newline_indices) - 1 + n = len (newline_indices) + while inf + 1 != sup: + m = (inf + sup) / 2 + if index >= newline_indices [m]: + inf = m + else: + sup = m + return inf + 1 + + +comments_re = re.compile ('(? comments_boundaries[k][0] + and end <= comments_boundaries[k][1]): + return True + elif end <= comments_boundaries[k][0]: + return False + return False + + +def read_file (f, d): + s = open (f).read () + base = os.path.basename (f) + dir = os.path.dirname (f) + + d['contents'][f] = s + + d['newline_indices'][f] = [m.end () for m in line_start_re.finditer (s)] + if options.check_comments: + d['comments_boundaries'][f] = [] + else: + d['comments_boundaries'][f] = calc_comments_boundaries (s) + + for m in node_include_re.finditer (s): + if m.group (1) == 'node': + line = which_line (m.start (), d['newline_indices'][f]) + d['nodes'][m.group (2)] = (f, line) + + elif m.group (1) == 'include': + try: + p = find_file (m.group (2), dir) + except EnvironmentError, (errno, strerror): + if strerror == file_not_found: + continue + else: + raise + read_file (p, d) + + +def read_manual (name): + """Look for all node names and cross-references in a Texinfo document + +Return a (manual, dictionary) tuple where manual is the cross-reference +macro name defined by references_dict[name], and dictionary +has the following keys: + + 'nodes' is a dictionary of `node name':(file name, line number), + + 'contents' is a dictionary of file:`full file contents', + + 'newline_indices' is a dictionary of +file:[list of beginning-of-line string indices], + + 'comments_boundaries' is a list of (start, end) tuples, +which contain string indices of start and end of each comment. + +Included files that can be found in the include path are processed too. + +""" + d = {} + d['nodes'] = {} + d['contents'] = {} + d['newline_indices'] = {} + d['comments_boundaries'] = {} + manual = manuals_defs.references_dict.get (name, '') + try: + f = find_file (name + '.tely') + except EnvironmentError, (errno, strerror): + if not strerror == file_not_found: + raise + else: + try: + f = find_file (name + '.texi') + except EnvironmentError, (errno, strerror): + if strerror == file_not_found: + sys.stderr.write (name + '.{texi,tely}: ' + + file_not_found + '\n') + return (manual, d) + else: + raise + + log.write ("Processing manual %s (%s)\n" % (f, manual)) + read_file (f, d) + return (manual, d) + + +log.write ("Reading files...\n") + +manuals = dict ([read_manual (name) + for name in manuals_defs.references_dict.keys ()]) + +ref_fixes = set () +bad_refs_count = 0 +fixes_count = 0 + +def add_fix (old_type, old_ref, new_type, new_ref): + ref_fixes.add ((old_type, old_ref, new_type, new_ref)) + + +def lookup_fix (r): + found = [] + for (old_type, old_ref, new_type, new_ref) in ref_fixes: + if r == old_ref: + found.append ((new_type, new_ref)) + return found + + +def preserve_linebreak (text, linebroken): + if linebroken: + if ' ' in text: + text = text.replace (' ', '\n', 1) + n = '' + else: + n = '\n' + else: + n = '' + return (text, n) + + +def choose_in_numbered_list (message, string_list, sep=' ', retries=3): + S = set (string_list) + S.discard ('') + string_list = list (S) + numbered_list = sep.join ([str (j + 1) + '. ' + string_list[j] + for j in range (len (string_list))]) + '\n' + t = retries + while t > 0: + value = '' + stdout.write (message + + "(press Enter to discard and start a new search)\n") + input = raw_input (numbered_list) + if not input: + return '' + try: + value = string_list[int (input) - 1] + except IndexError: + stdout.write ("Error: index number out of range\n") + except ValueError: + matches = [input in v for v in string_list] + n = matches.count (True) + if n == 0: + stdout.write ("Error: input matches no item in the list\n") + elif n > 1: + stdout.write ("Error: ambiguous input (matches several items \ +in the list)\n") + else: + value = string_list[matches.index (True)] + if value: + return value + t -= 1 + raise InteractionError ("%d retries limit exceeded" % retries) + +refs_count = 0 + +def check_ref (manual, file, m): + global fixes_count, bad_refs_count, refs_count + refs_count += 1 + bad_ref = False + fixed = True + type = m.group (1) + original_name = m.group ('ref') or m.group ('refname') + name = whitespace_re.sub (' ', original_name). strip () + newline_indices = manuals[manual]['newline_indices'][file] + line = which_line (m.start (), newline_indices) + linebroken = '\n' in original_name + original_display_name = m.group ('display') + next_char = m.group ('last') + if original_display_name: # the xref has an explicit display name + display_linebroken = '\n' in original_display_name + display_name = whitespace_re.sub (' ', original_display_name). strip () + commented_out = is_commented_out \ + (m.start (), m.end (), manuals[manual]['comments_boundaries'][file]) + useful_fix = not outdir in file + + # check puncuation after x-ref + if options.check_punctuation and not next_char in '.,;:!?': + stdout.write ("Warning: %s: %d: `%s': x-ref \ +not followed by punctuation\n" % (file, line, name)) + + # validate xref + explicit_type = type + new_name = name + + if type != 'ref' and type == manual and not commented_out: + if useful_fix: + fixed = False + bad_ref = True + stdout.write ("\n%s: %d: `%s': external %s x-ref should be internal\n" + % (file, line, name, type)) + if options.auto_fix or yes_prompt ("Fix this?"): + type = 'ref' + + if type == 'ref': + explicit_type = manual + + if not name in manuals[explicit_type]['nodes'] and not commented_out: + bad_ref = True + fixed = False + stdout.write ('\n') + if type == 'ref': + stdout.write ("%s: %d: `%s': wrong internal x-ref\n" + % (file, line, name)) + else: + stdout.write ("%s: %d: `%s': wrong external `%s' x-ref\n" + % (file, line, name, type)) + # print context + stdout.write ('--\n' + manuals[manual]['contents'][file] + [newline_indices[max (0, line - 2)]: + newline_indices[min (line + 3, + len (newline_indices) - 1)]] + + '--\n') + + # try to find the reference in other manuals + found = [] + for k in [k for k in manuals if k != explicit_type]: + if name in manuals[k]['nodes']: + if k == manual: + found = ['ref'] + stdout.write (" found as internal x-ref\n") + break + else: + found.append (k) + stdout.write (" found as `%s' x-ref\n" % k) + + if (len (found) == 1 + and (options.auto_fix or yes_prompt ("Fix this x-ref?"))): + add_fix (type, name, found[0], name) + type = found[0] + fixed = True + + elif len (found) > 1 and useful_fix: + if options.interactive or options.auto_fix: + stdout.write ("* Several manuals contain this node name, \ +cannot determine manual automatically.\n") + if options.interactive: + t = choose_in_numbered_list ("Choose manual for this x-ref by \ +index number or beginning of name:\n", found) + if t: + add_fix (type, name, t, name) + type = t + fixed = True + + if not fixed: + # try to find a fix already made + found = lookup_fix (name) + + if len (found) == 1: + stdout.write ("Found one previous fix: %s `%s'\n" % found[0]) + if options.auto_fix or yes_prompt ("Apply this fix?"): + type, new_name = found[0] + fixed = True + + elif len (found) > 1: + if options.interactive or options.auto_fix: + stdout.write ("* Several previous fixes match \ +this node name, cannot fix automatically.\n") + if options.interactive: + concatened = choose_in_numbered_list ("Choose new manual \ +and x-ref by index number or beginning of name:\n", [''.join ([i[0], ' ', i[1]]) + for i in found], + sep='\n') + if concatened: + type, new_name = concatenated.split (' ', 1) + fixed = True + + if not fixed: + # all previous automatic fixing attempts failed, + # ask user for substring to look in node names + while True: + node_list = search_prompt () + if node_list == None: + if options.interactive: + stdout.write (warn_not_fixed) + break + elif not node_list: + stdout.write ("No matched node names.\n") + else: + concatenated = choose_in_numbered_list ("Choose \ +node name and manual for this x-ref by index number or beginning of name:\n", \ + [' '.join ([i[0], i[1], '(in %s)' % i[2]]) + for i in node_list], + sep='\n') + if concatenated: + t, z = concatenated.split (' ', 1) + new_name = z.split (' (in ', 1)[0] + add_fix (type, name, t, new_name) + type = t + fixed = True + break + + if fixed and type == manual: + type = 'ref' + bad_refs_count += int (bad_ref) + if bad_ref and not useful_fix: + stdout.write ("*** Warning: this file is automatically generated, \ +please fix the code source instead of generated documentation.\n") + + # compute returned string + if new_name == name: + if bad_ref and (options.interactive or options.auto_fix): + # only the type of the ref was fixed + fixes_count += int (fixed) + if original_display_name: + return ('@%snamed{%s,%s}' % (type, original_name, original_display_name)) + next_char + else: + return ('@%s{%s}' % (type, original_name)) + next_char + else: + fixes_count += int (fixed) + (ref, n) = preserve_linebreak (new_name, linebroken) + if original_display_name: + if bad_ref: + stdout.write ("Current display name is `%s'\n") + display_name = raw_input \ + ("Enter a new display name or press enter to keep the existing name:\n") \ + or display_name + (display_name, n) = preserve_linebreak (display_name, display_linebroken) + else: + display_name = original_display_name + return ('@%snamed{%s,%s}' % (type, ref, display_name)) + \ + next_char + n + else: + return ('@%s{%s}' % (type, ref)) + next_char + n + + +log.write ("Checking cross-references...\n") + +try: + for key in manuals: + for file in manuals[key]['contents']: + s = ref_re.sub (lambda m: check_ref (key, file, m), + manuals[key]['contents'][file]) + if s != manuals[key]['contents'][file]: + open (file, 'w').write (s) +except KeyboardInterrupt: + log.write ("Operation interrupted, exiting.\n") + sys.exit (2) +except InteractionError, instance: + log.write ("Operation refused by user: %s\nExiting.\n" % instance) + sys.exit (3) + +log.write ("Done: %d x-refs found, %d bad x-refs found, fixed %d.\n" % + (refs_count, bad_refs_count, fixes_count)) diff --git a/scripts/aux/check_translation.py b/scripts/aux/check_translation.py new file mode 100755 index 0000000000..090b1fbe8a --- /dev/null +++ b/scripts/aux/check_translation.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +import __main__ +import optparse +import os +import sys + +import langdefs +import buildlib + +verbose = 0 +use_colors = False +lang = 'C' +C = lang + +def dir_lang (file, lang, lang_dir_index): + path_components = file.split ('/') + path_components[lang_dir_index] = lang + return os.path.join (*path_components) + +def do_file (file_name, lang_codes, buildlib): + if verbose: + sys.stderr.write ('%s...\n' % file_name) + split_file_name = file_name.split ('/') + d1, d2 = split_file_name[0:2] + if d1 in lang_codes: + check_lang = d1 + lang_dir_index = 0 + elif d2 in lang_codes: + check_lang = d2 + lang_dir_index = 1 + else: + check_lang = lang + if check_lang == C: + raise Exception ('cannot determine language for ' + file_name) + + original = dir_lang (file_name, '', lang_dir_index) + translated_contents = open (file_name).read () + (diff_string, error) \ + = buildlib.check_translated_doc (original, + file_name, + translated_contents, + color=use_colors and not update_mode) + + if error: + sys.stderr.write ('warning: %s: %s' % (file_name, error)) + + if update_mode: + if error or len (diff_string) >= os.path.getsize (original): + buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + original) + elif diff_string: + diff_file = original + '.diff' + f = open (diff_file, 'w') + f.write (diff_string) + f.close () + buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + diff_file) + os.remove (diff_file) + else: + sys.stdout.write (diff_string) + +def usage (): + sys.stdout.write (r''' +Usage: +check-translation [--language=LANG] [--verbose] [--update] FILE... + +This script is licensed under the GNU GPL. +''') + +def do_options (): + global lang, verbose, update_mode, use_colors + + p = optparse.OptionParser (usage="check-translation [--language=LANG] [--verbose] FILE...", + description="This script is licensed under the GNU GPL.") + p.add_option ("--language", + action='store', + default='site', + dest="language") + p.add_option ("--no-color", + action='store_false', + default=True, + dest="color", + help="do not print ANSI-cooured output") + p.add_option ("--verbose", + action='store_true', + default=False, + dest="verbose", + help="print details, including executed shell commands") + p.add_option ('-u', "--update", + action='store_true', + default=False, + dest='update_mode', + help='call $EDITOR to update the translation') + + (options, files) = p.parse_args () + verbose = options.verbose + lang = options.language + use_colors = options.color + update_mode = options.update_mode + + return files + +def main (): + global update_mode, text_editor + + files = do_options () + if 'EDITOR' in os.environ: + text_editor = os.environ['EDITOR'] + else: + update_mode = False + + buildlib.verbose = verbose + + for i in files: + do_file (i, langdefs.LANGDICT.keys (), buildlib) + +if __name__ == '__main__': + main () diff --git a/scripts/aux/coverage.py b/scripts/aux/coverage.py new file mode 100755 index 0000000000..9ff86b0ef1 --- /dev/null +++ b/scripts/aux/coverage.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python + +import os +import glob +import re +import sys +import optparse + +#File 'accidental-engraver.cc' +#Lines executed:87.70% of 252 + +def summary (args): + results = [] + for f in args: + str = open (f).read () + m = re.search ("File '([^']+.cc)'\s*Lines executed:([0-9.]+)% of ([0-9]+)", str) + + if m and '/usr/lib' in m.group (1): + continue + + if m: + cov = float (m.group (2)) + lines = int (m.group (3)) + pain = lines * (100.0 - cov) + file = m.group (1) + tup = (pain, locals ().copy()) + + results.append(tup) + + results.sort () + results.reverse() + + print 'files sorted by number of untested lines (decreasing)' + print + print '%5s (%6s): %s' % ('cov %', 'lines', 'file') + print '----------------------------------------------' + + for (pain, d) in results: + print '%(cov)5.2f (%(lines)6d): %(file)s' % d + +class Chunk: + def __init__ (self, range, coverage_count, all_lines, file): + assert coverage_count >= 0 + assert type (range) == type (()) + + self.coverage_count = coverage_count + self.range = range + self.all_lines = all_lines + self.file = file + + def length (self): + return self.range[1] - self.range[0] + + def text (self): + return ''.join ([l[2] for l in self.lines()]) + + def lines (self): + return self.all_lines[self.range[0]: + self.range[1]] + def widen (self): + self.range = (min (self.range[0] -1, 0), + self.range[0] +1) + def write (self): + print 'chunk in', self.file + for (c, n, l) in self.lines (): + cov = '%d' % c + if c == 0: + cov = '#######' + elif c < 0: + cov = '' + sys.stdout.write ('%8s:%8d:%s' % (cov, n, l)) + + def uncovered_score (self): + return self.length () + +class SchemeChunk (Chunk): + def uncovered_score (self): + text = self.text () + if (text.startswith ('(define ') + and not text.startswith ('(define (')): + return 0 + + if text.startswith ('(use-modules '): + return 0 + + if (text.startswith ('(define-public ') + and not text.startswith ('(define-public (')): + return 0 + + return len ([l for (c,n,l) in self.lines() if (c == 0)]) + +def read_gcov (f): + ls = [] + + in_lines = [l for l in open (f).readlines ()] + (count_len, line_num_len) = tuple (map (len, in_lines[0].split (':')[:2])) + + for l in in_lines: + c = l[:count_len].strip () + l = l[count_len+1:] + n = int (l[:line_num_len].strip ()) + + if n == 0: + continue + + if '#' in c: + c = 0 + elif c == '-': + c = -1 + else: + c = int (c) + + l = l[line_num_len+1:] + + ls.append ((c,n,l)) + + return ls + +def get_c_chunks (ls, file): + chunks = [] + chunk = [] + + last_c = -1 + for (c, n, l) in ls: + if not (c == last_c or c < 0 and l != '}\n'): + if chunk and last_c >= 0: + nums = [n-1 for (n, l) in chunk] + chunks.append (Chunk ((min (nums), max (nums)+1), + last_c, ls, file)) + chunk = [] + + chunk.append ((n,l)) + if c >= 0: + last_c = c + + return chunks + +def get_scm_chunks (ls, file): + chunks = [] + chunk = [] + + def new_chunk (): + if chunk: + nums = [n-1 for (n, l) in chunk] + chunks.append (SchemeChunk ((min (nums), max (nums)+1), + max (last_c, 0), ls, file)) + chunk[:] = [] + + last_c = -1 + for (cov_count, line_number, line) in ls: + if line.startswith ('('): + new_chunk () + last_c = -1 + + chunk.append ((line_number, line)) + if cov_count >= 0: + last_c = cov_count + + return chunks + +def widen_chunk (ch, ls): + a -= 1 + b += 1 + + return [(n, l) for (c, n, l) in ls[a:b]] + + +def extract_chunks (file): + try: + ls = read_gcov (file) + except IOError, s : + print s + return [] + + cs = [] + if 'scm' in file: + cs = get_scm_chunks (ls, file) + else: + cs = get_c_chunks (ls, file) + return cs + + +def filter_uncovered (chunks): + def interesting (c): + if c.coverage_count > 0: + return False + + t = c.text() + for stat in ('warning', 'error', 'print', 'scm_gc_mark'): + if stat in t: + return False + return True + + return [c for c in chunks if interesting (c)] + + +def main (): + p = optparse.OptionParser (usage="usage coverage.py [options] files", + description="") + p.add_option ("--summary", + action='store_true', + default=False, + dest="summary") + + p.add_option ("--hotspots", + default=False, + action='store_true', + dest="hotspots") + + p.add_option ("--uncovered", + default=False, + action='store_true', + dest="uncovered") + + + (options, args) = p.parse_args () + + + if options.summary: + summary (['%s.gcov-summary' % s for s in args]) + + if options.uncovered or options.hotspots: + chunks = [] + for a in args: + name = a + if name.endswith ('scm'): + name += '.cov' + else: + name += '.gcov' + + chunks += extract_chunks (name) + + if options.uncovered: + chunks = filter_uncovered (chunks) + chunks = [(c.uncovered_score (), c) for c in chunks if c.uncovered_score() > 0] + elif options.hotspots: + chunks = [((c.coverage_count, -c.length()), c) for c in chunks] + + + chunks.sort () + chunks.reverse () + for (score, c) in chunks: + c.write () + + + +if __name__ == '__main__': + main () diff --git a/scripts/aux/find-superfluous-includes.py b/scripts/aux/find-superfluous-includes.py new file mode 100755 index 0000000000..de40df088a --- /dev/null +++ b/scripts/aux/find-superfluous-includes.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +import sys +import re +import os + + +full_paths = {} +incs = {} +inc_re = re.compile ('^#include "([^"]+)"') +def parse_file (fn): + lst = [] + + lc = 0 + for l in open (fn).readlines(): + lc += 1 + m = inc_re.search (l) + if m: + lst.append ((lc, m.group (1))) + + base = os.path.split (fn)[1] + full_paths[base] = fn + incs[base] = lst + + +def has_include (f, name): + try: + return name in [b for (a,b) in incs[f]] + except KeyError: + return False + +for a in sys.argv: + parse_file (a) + +print '-*-compilation-*-' +for (f, lst) in incs.items (): + for (n, inc) in lst: + for (n2, inc2) in lst: + if has_include (inc2, inc): + print "%s:%d: already have %s from %s" % (full_paths[f], n, + inc, inc2) + break + + + diff --git a/scripts/aux/fixcc.py b/scripts/aux/fixcc.py new file mode 100755 index 0000000000..167994cdab --- /dev/null +++ b/scripts/aux/fixcc.py @@ -0,0 +1,625 @@ +#!/usr/bin/env python + +# fixcc -- nitpick lily's c++ code + +# TODO +# * maintainable rules: regexp's using whitespace (?x) and match names +# ) +# * trailing `*' vs. function definition +# * do not break/change indentation of fixcc-clean files +# * check lexer, parser +# * rewrite in elisp, add to cc-mode +# * using regexes is broken by design +# * ? +# * profit + +import __main__ +import getopt +import os +import re +import string +import sys +import time + +COMMENT = 'COMMENT' +STRING = 'STRING' +GLOBAL_CXX = 'GC++' +CXX = 'C++' +verbose_p = 0 +indent_p = 0 + +rules = { + GLOBAL_CXX: + [ + # delete gratuitous block + ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''', + '\n\\2;'), + ], + CXX: + [ + # space before parenthesis open + ('([^\( \]])[ \t]*\(', '\\1 ('), + # space after comma + ("\([^'],\)[ \t]*", '\1 '), + # delete gratuitous block + ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''', + '\n\\2;'), + # delete inline tabs + ('(\w)\t+', '\\1 '), + # delete inline double spaces + (' *', ' '), + # delete space after parenthesis open + ('\([ \t]*', '('), + # delete space before parenthesis close + ('[ \t]*\)', ')'), + # delete spaces after prefix + ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'), + # delete spaces before postfix + ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'), + # delete space after parenthesis close + #('\)[ \t]*([^\w])', ')\\1'), + # delete space around operator + # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'), + ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'), + # delete space after operator + ('(::)([ \t]*)([\w\(\)])', '\\1\\3'), + # delete superflous space around operator + ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'), + # space around operator1 + ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'), + # space around operator2 + ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'), + # space around operator3 + ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'), + # space around operator4 + ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'), + # space around +/-; exponent + ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'), + ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'), + # trailing operator + (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '), + # pointer + ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'), + ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'), + #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'), + # pointer with template + ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'), + #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'), + # unary pointer, minus, not + ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'), + # space after `operator' + ('(\Woperator) *([^\w\s])', '\\1 \\2'), + # dangling brace close + ('\n[ \t]*(\n[ \t]*})', '\\1'), + # dangling newline + ('\n[ \t]*\n[ \t]*\n', '\n\n'), + # dangling parenthesis open + #('[ \t]*\n[ \t]*\([ \t]*\n', '('), + ('\([ \t]*\n', '('), + # dangling parenthesis close + ('\n[ \t]*\)', ')'), + # dangling comma + ('\n[ \t]*,', ','), + # dangling semicolon + ('\n[ \t]*;', ';'), + # brace open + ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'), + # brace open backslash + ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'), + # brace close + ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'), + # brace close backslash + ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'), + # delete space after `operator' + #('(\Woperator) (\W)', '\\1\\2'), + # delete space after case, label + ('(\W(case|label) ([\w]+)) :', '\\1:'), + # delete space before comma + ('[ \t]*,', ','), + # delete space before semicolon + ('[ \t]*;', ';'), + # delete space before eol-backslash + ('[ \t]*\\\\\n', '\\\n'), + # delete trailing whitespace + ('[ \t]*\n', '\n'), + + ## Deuglify code that also gets ugly by rules above. + # delete newline after typedef struct + ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'), + # delete spaces around template brackets + #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'), + ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'), + ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'), + ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'), + ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'), + # do {..} while + ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'), + + ## Fix code that gets broken by rules above. + ##('->\s+\*', '->*'), + # delete space before #define x() + ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('), + # add space in #define x () + ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)', + '#define \\1 \\2'), + # delete space in #include <> + ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>', + '#include <\\1\\2\\3>'), + # delete backslash before empty line (emacs' indent region is broken) + ('\\\\\n\n', '\n\n'), + ], + + COMMENT: + [ + # delete trailing whitespace + ('[ \t]*\n', '\n'), + # delete empty first lines + ('(/\*\n)\n*', '\\1'), + # delete empty last lines + ('\n*(\n\*/)', '\\1'), + ## delete newline after start? + #('/(\*)\n', '\\1'), + ## delete newline before end? + #('\n(\*/)', '\\1'), + ], + } + +# Recognize special sequences in the input. +# +# (?Pregex) -- Assign result of REGEX to NAME. +# *? -- Match non-greedily. +# (?m) -- Multiline regex: Make ^ and $ match at each line. +# (?s) -- Make the dot match all characters including newline. +# (?x) -- Ignore whitespace in patterns. +no_match = 'a\ba' +snippet_res = { + CXX: { + 'multiline_comment': + r'''(?sx) + (?P + (?P + [ \t]*/\*.*?\*/))''', + + 'singleline_comment': + r'''(?mx) + ^.* + (?P + (?P + [ \t]*//([ \t][^\n]*|)\n))''', + + 'string': + r'''(?x) + (?P + (?P + "([^\"\n](\")*)*"))''', + + 'char': + r'''(?x) + (?P + (?P + '([^']+|\')))''', + + 'include': + r'''(?x) + (?P + (?P + "#[ \t]*include[ \t]*<[^>]*>''', + }, + } + +class Chunk: + def replacement_text (self): + return '' + + def filter_text (self): + return self.replacement_text () + +class Substring (Chunk): + def __init__ (self, source, start, end): + self.source = source + self.start = start + self.end = end + + def replacement_text (self): + s = self.source[self.start:self.end] + if verbose_p: + sys.stderr.write ('CXX Rules') + for i in rules[CXX]: + if verbose_p: + sys.stderr.write ('.') + #sys.stderr.write ('\n\n***********\n') + #sys.stderr.write (i[0]) + #sys.stderr.write ('\n***********\n') + #sys.stderr.write ('\n=========>>\n') + #sys.stderr.write (s) + #sys.stderr.write ('\n<<=========\n') + s = re.sub (i[0], i[1], s) + if verbose_p: + sys.stderr.write ('done\n') + return s + + +class Snippet (Chunk): + def __init__ (self, type, match, format): + self.type = type + self.match = match + self.hash = 0 + self.options = [] + self.format = format + + def replacement_text (self): + return self.match.group ('match') + + def substring (self, s): + return self.match.group (s) + + def __repr__ (self): + return `self.__class__` + ' type = ' + self.type + +class Multiline_comment (Snippet): + def __init__ (self, source, match, format): + self.type = type + self.match = match + self.hash = 0 + self.options = [] + self.format = format + + def replacement_text (self): + s = self.match.group ('match') + if verbose_p: + sys.stderr.write ('COMMENT Rules') + for i in rules[COMMENT]: + if verbose_p: + sys.stderr.write ('.') + s = re.sub (i[0], i[1], s) + return s + +snippet_type_to_class = { + 'multiline_comment': Multiline_comment, +# 'string': Multiline_comment, +# 'include': Include_snippet, +} + +def find_toplevel_snippets (s, types): + if verbose_p: + sys.stderr.write ('Dissecting') + + res = {} + for i in types: + res[i] = re.compile (snippet_res[format][i]) + + snippets = [] + index = 0 + ## found = dict (map (lambda x: (x, None), + ## types)) + ## urg python2.1 + found = {} + map (lambda x, f = found: f.setdefault (x, None), + types) + + # We want to search for multiple regexes, without searching + # the string multiple times for one regex. + # Hence, we use earlier results to limit the string portion + # where we search. + # Since every part of the string is traversed at most once for + # every type of snippet, this is linear. + + while 1: + if verbose_p: + sys.stderr.write ('.') + first = None + endex = 1 << 30 + for type in types: + if not found[type] or found[type][0] < index: + found[type] = None + m = res[type].search (s[index:endex]) + if not m: + continue + + cl = Snippet + if snippet_type_to_class.has_key (type): + cl = snippet_type_to_class[type] + snip = cl (type, m, format) + start = index + m.start ('match') + found[type] = (start, snip) + + if found[type] \ + and (not first \ + or found[type][0] < found[first][0]): + first = type + + # FIXME. + + # Limiting the search space is a cute + # idea, but this *requires* to search + # for possible containing blocks + # first, at least as long as we do not + # search for the start of blocks, but + # always/directly for the entire + # @block ... @end block. + + endex = found[first][0] + + if not first: + snippets.append (Substring (s, index, len (s))) + break + + (start, snip) = found[first] + snippets.append (Substring (s, index, start)) + snippets.append (snip) + found[first] = None + index = start + len (snip.match.group ('match')) + + return snippets + +def nitpick_file (outdir, file): + s = open (file).read () + + for i in rules[GLOBAL_CXX]: + s = re.sub (i[0], i[1], s) + + # FIXME: Containing blocks must be first, see + # find_toplevel_snippets. + # We leave simple strings be part of the code + snippet_types = ( + 'multiline_comment', + 'singleline_comment', + 'string', +# 'char', + ) + + chunks = find_toplevel_snippets (s, snippet_types) + #code = filter (lambda x: is_derived_class (x.__class__, Substring), + # chunks) + + t = string.join (map (lambda x: x.filter_text (), chunks), '') + fixt = file + if s != t: + if not outdir: + os.system ('mv %s %s~' % (file, file)) + else: + fixt = os.path.join (outdir, + os.path.basename (file)) + h = open (fixt, "w") + h.write (t) + h.close () + if s != t or indent_p: + indent_file (fixt) + +def indent_file (file): + emacs = '''emacs\ + --no-window-system\ + --batch\ + --no-site-file\ + --no-init-file\ + %(file)s\ + --eval '(let ((error nil) + (version-control nil)) + (load-library "cc-mode") + (c++-mode) + (indent-region (point-min) (point-max)) + (if (buffer-modified-p (current-buffer)) + (save-buffer)))' ''' % vars () + emacsclient = '''emacsclient\ + --socket-name=%(socketdir)s/%(socketname)s\ + --no-wait\ + --eval '(let ((error nil) + (version-control nil)) + (load-library "cc-mode") + (find-file "%(file)s") + (c++-mode) + (indent-region (point-min) (point-max)) + (if (buffer-modified-p (current-buffer)) + (save-buffer)))' ''' \ + % { 'file': file, + 'socketdir' : socketdir, + 'socketname' : socketname, } + if verbose_p: + sys.stderr.write (emacs) + sys.stderr.write ('\n') + os.system (emacs) + + +def usage (): + sys.stdout.write (r''' +Usage: +fixcc [OPTION]... FILE... + +Options: + --help + --indent reindent, even if no changes + --verbose + --test + +Typical use with LilyPond: + + fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out) + +This script is licensed under the GNU GPL +''') + +def do_options (): + global indent_p, outdir, verbose_p + (options, files) = getopt.getopt (sys.argv[1:], '', + ['help', 'indent', 'outdir=', + 'test', 'verbose']) + for (o, a) in options: + if o == '--help': + usage () + sys.exit (0) + elif o == '--indent': + indent_p = 1 + elif o == '--outdir': + outdir = a + elif o == '--verbose': + verbose_p = 1 + elif o == '--test': + test () + sys.exit (0) + else: + assert unimplemented + if not files: + usage () + sys.exit (2) + return files + + +outdir = 0 +format = CXX +socketdir = '/tmp/fixcc' +socketname = 'fixcc%d' % os.getpid () + +def setup_client (): + #--no-window-system\ + #--batch\ + os.unlink (os.path.join (socketdir, socketname)) + os.mkdir (socketdir, 0700) + emacs='''emacs\ + --no-site-file\ + --no-init-file\ + --eval '(let ((error nil) + (version-control nil)) + (load-library "server") + (setq server-socket-dir "%(socketdir)s") + (setq server-name "%(socketname)s") + (server-start) + (while t) (sleep 1000))' ''' \ + % { 'socketdir' : socketdir, + 'socketname' : socketname, } + + if not os.fork (): + os.system (emacs) + sys.exit (0) + while not os.path.exists (os.path.join (socketdir, socketname)): + time.sleep (1) + +def main (): + #emacsclient should be faster, but this does not work yet + #setup_client () + files = do_options () + if outdir and not os.path.isdir (outdir): + os.makedirs (outdir) + for i in files: + sys.stderr.write ('%s...\n' % i) + nitpick_file (outdir, i) + + +## TODO: make this compilable and check with g++ +TEST = ''' +#include +#include +class +ostream ; + +class Foo { +public: static char* foo (); +std::map* bar (char, char) { return 0; } +}; +typedef struct +{ + Foo **bar; +} String; + +ostream & +operator << (ostream & os, String d); + +typedef struct _t_ligature +{ + char *succ, *lig; + struct _t_ligature * next; +} AFM_Ligature; + +typedef std::map < AFM_Ligature const *, int > Bar; + + /** + (c) 1997--2008 Han-Wen Nienhuys + */ + +/* || +* vv +* !OK OK +*/ +/* || + vv + !OK OK +*/ +char * +Foo:: foo () +{ +int +i +; + char* a= &++ i ; + a [*++ a] = (char*) foe (*i, &bar) * + 2; + int operator double (); + std::map y =*bar(-*a ,*b); + Interval_t & operator*= (T r); + Foo*c; + int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2); + delete *p; + if (abs (f)*2 > abs (d) *FUDGE) + ; + while (0); + for (; ibar); + for (; *p && > y; + foo > bar) +; + do { + ;;; + } + while (foe); + + squiggle. extent; + 1 && * unsmob_moment (lf); + line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm +(): SCM_EOL); + case foo: k; + + if (0) {a=b;} else { + c=d; + } + + cookie_io_functions_t Memory_out_stream::functions_ = { + Memory_out_stream::reader, + ... + }; + + int compare (Array < Pitch> *, Array < Pitch> *); + original_ = (Grob *) & s; + Drul_array< Link_array > o; +} + + header_.char_info_pos = (6 + header_length) * 4; + return ly_bool2scm (*ma < * mb); + + 1 *::sign(2); + + (shift) *-d; + + a = 0 ? *x : *y; + +a = "foo() 2,2,4"; +{ + if (!span_) + { + span_ = make_spanner ("StaffSymbol", SCM_EOL); + } +} +{ + if (!span_) + { + span_ = make_spanner (StaffSymbol, SCM_EOL); + } +} +''' + +def test (): + test_file = 'fixcc.cc' + open (test_file, 'w').write (TEST) + nitpick_file (outdir, test_file) + sys.stdout.write (open (test_file).read ()) + +if __name__ == '__main__': + main () + diff --git a/scripts/aux/makelsr.py b/scripts/aux/makelsr.py new file mode 100755 index 0000000000..cb0619f6de --- /dev/null +++ b/scripts/aux/makelsr.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python + +import sys +import os +import glob +import re + +USAGE = ''' Usage: makelsr.py LSR_SNIPPETS_DIR +This script must be run from top of the source tree; +it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR. +''' + +LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it +%% This file is in the public domain. +''' + +LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new +%% This file is in the public domain. +''' + +DEST = os.path.join ('input', 'lsr') +NEW_LYS = os.path.join ('input', 'new') +TEXIDOCS = os.path.join ('input', 'texidocs') + +TAGS = [] +# NR 1 +TAGS.extend (['pitches', 'rhythms', 'expressive-marks', +'repeats', 'simultaneous-notes', 'staff-notation', +'editorial-annotations', 'text']) +# NR 2 +TAGS.extend (['vocal-music', 'chords', 'keyboards', +'percussion', 'fretted-strings', 'unfretted-strings', +'ancient-notation', 'winds', 'world-music' +]) + +# other +TAGS.extend (['contexts-and-engravers', 'tweaks-and-overrides', +'paper-and-layout', 'breaks', 'spacing', 'midi', 'titles', 'template']) + +def exit_with_usage (n=0): + sys.stderr.write (USAGE) + sys.exit (n) + +try: + in_dir = sys.argv[1] +except: + exit_with_usage (2) + +if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)): + exit_with_usage (3) + +unsafe = [] +unconverted = [] +notags_files = [] + +# mark the section that will be printed verbatim by lilypond-book +end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S) + +def mark_verbatim_section (ly_code): + return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1) + +# '% LSR' comments are to be stripped +lsr_comment_re = re.compile (r'\s*%+\s*LSR.*') + +begin_header_re = re.compile (r'\\header\s*{', re.M) + +# add tags to ly files from LSR +def add_tags (ly_code, tags): + return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1) + +def copy_ly (srcdir, name, tags): + global unsafe + global unconverted + dest = os.path.join (DEST, name) + tags = ', '.join (tags) + s = open (os.path.join (srcdir, name)).read () + + texidoc_translations_path = os.path.join (TEXIDOCS, + os.path.splitext (name)[0] + '.texidoc') + if os.path.exists (texidoc_translations_path): + texidoc_translations = open (texidoc_translations_path).read () + # Since we want to insert the translations verbatim using a + # regexp, \\ is understood as ONE escaped backslash. So we have + # to escape those backslashes once more... + texidoc_translations = texidoc_translations.replace ('\\', '\\\\') + s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1) + + if in_dir in srcdir: + s = LY_HEADER_LSR + add_tags (s, tags) + else: + s = LY_HEADER_NEW + s + + s = mark_verbatim_section (s) + s = lsr_comment_re.sub ('', s) + open (dest, 'w').write (s) + + e = os.system ("convert-ly -e '%s'" % dest) + if e: + unconverted.append (dest) + if os.path.exists (dest + '~'): + os.remove (dest + '~') + # -V seems to make unsafe snippets fail nicer/sooner + e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest) + if e: + unsafe.append (dest) + +def read_source_with_dirs (src): + s = {} + l = {} + for tag in TAGS: + srcdir = os.path.join (src, tag) + l[tag] = set (map (os.path.basename, glob.glob (os.path.join (srcdir, '*.ly')))) + for f in l[tag]: + if f in s: + s[f][1].append (tag) + else: + s[f] = (srcdir, [tag]) + return s, l + + +tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"') + +def read_source (src): + s = {} + l = dict ([(tag, set()) for tag in TAGS]) + for f in glob.glob (os.path.join (src, '*.ly')): + basename = os.path.basename (f) + m = tags_re.search (open (f, 'r').read ()) + if m: + file_tags = [tag.strip() for tag in m.group (1). split(',')] + s[basename] = (src, file_tags) + [l[tag].add (basename) for tag in file_tags if tag in TAGS] + else: + notags_files.append (f) + return s, l + + +def dump_file_list (file, list): + f = open (file, 'w') + f.write ('\n'.join (list) + '\n') + +## clean out existing lys and generated files +map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) + + glob.glob (os.path.join (DEST, '*.snippet-list'))) + +# read LSR source where tags are defined by subdirs +snippets, tag_lists = read_source_with_dirs (in_dir) +# read input/new where tags are directly +s, l = read_source (NEW_LYS) +snippets.update (s) +for t in TAGS: + tag_lists[t].update (l[t]) + +for (name, (srcdir, tags)) in snippets.items (): + copy_ly (srcdir, name, tags) + +for (tag, file_set) in tag_lists.items (): + dump_file_list (os.path.join (DEST, tag + '.snippet-list'), sorted(file_set)) + +if unconverted: + sys.stderr.write ('These files could not be converted successfully by convert-ly:\n') + sys.stderr.write ('\n'.join (unconverted) + '\n\n') + +if notags_files: + sys.stderr.write ('No tags could be found in these files:\n') + sys.stderr.write ('\n'.join (notags_files) + '\n\n') + +dump_file_list ('lsr-unsafe.txt', unsafe) +sys.stderr.write (''' + +Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY! + git add input/lsr/*.ly + xargs git-diff HEAD < lsr-unsafe.txt + +''') + diff --git a/scripts/aux/musicxml_generate_intervals.py b/scripts/aux/musicxml_generate_intervals.py new file mode 100755 index 0000000000..3c00715d14 --- /dev/null +++ b/scripts/aux/musicxml_generate_intervals.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +notes = "CDEFGAB" +alterations = [-1, 0, 1] + +def print_note (octave, note, alteration): + print " \n \n %s" % notes[note] + if alteration <> 0: + print " %s" % alteration + print " %s\n \n 1\n 1\n quarter\n " % octave + + +print """ + + + Various piches and interval sizes + + + MusicXML Part + + + + + + + 1 + + 0 + major + + + + G + 2 + + +""" + +start_octave = 5 + +for octave in (start_octave, start_octave+1): + for note in (0,1,2,3,4,5,6): + for alteration in alterations: + if octave == start_octave and note == 0 and alteration == -1: + continue + print_note (octave, note, alteration) +# if octave == start_octave and note == 0 and alteration == 0: +# continue + print_note (start_octave-(octave-start_octave)-(1-(7-note)/7), (7-note)%7, -alteration) + +print """ + + +""" diff --git a/scripts/aux/musicxml_generate_keys.py b/scripts/aux/musicxml_generate_keys.py new file mode 100755 index 0000000000..7a16ac987f --- /dev/null +++ b/scripts/aux/musicxml_generate_keys.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +notes = "CDEFGAB" +alterations = [-1, 0, 1] + +def print_measure (nr, fifth, mode, atts1 = "", atts = "", final = ""): + print """ + +%s + %s + %s + +%s + + + C + 4 + + 2 + 1 + half + +%s """ % (nr, atts1, fifth, mode, atts, final) + +first_div = """ 1 +""" +first_atts = """ + + G + 2 + +""" + +final_barline = """ + light-heavy + +""" + +print """ + + + Different Key signatures + + + Various key signature: from 11 + flats to 11 sharps (each one first one measure in major, then one + measure in minor) + + + + + MusicXML Part + + + + """ + +max_range = 11 +measure = 0 +for fifth in range(-max_range, max_range+1): + measure += 1 + if fifth == -max_range: + print_measure (measure, fifth, "major", first_div, first_atts) + else: + print_measure (measure, fifth, "major") + measure += 1 + if fifth == max_range: + print_measure (measure, fifth, "minor", "", "", final_barline) + else: + print_measure (measure, fifth, "minor") + + +print """ +""" diff --git a/scripts/aux/musicxml_generate_timesignatures.py b/scripts/aux/musicxml_generate_timesignatures.py new file mode 100755 index 0000000000..c4cc78a103 --- /dev/null +++ b/scripts/aux/musicxml_generate_timesignatures.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python + +notes = "CDEFGAB" +alterations = [-1, 0, 1] + +dot_xml = """ +""" +tie_xml = """ +""" +tie_notation_xml = """ +""" + + +def generate_note (duration, end_tie = False): + if duration < 2: + (notetype, dur) = ("8th", 1) + elif duration < 4: + (notetype, dur) = ("quarter", 2) + elif duration < 8: + (notetype, dur) = ("half", 4) + else: + (notetype, dur) = ("whole", 8) + dur_processed = dur + dot = "" + if (duration - dur_processed >= dur/2): + dot = dot_xml + dur_processed += dur/2 + if (duration - dur_processed >= max(dur/4, 1)): + dot += dot_xml + dur_processed += dur/4 + tie = "" + tie_notation = "" + if end_tie: + tie += tie_xml % "stop" + tie_notation += tie_notation_xml % "stop" + second_note = None + if duration - dur_processed > 0: + second_note = generate_note (duration-dur_processed, True) + tie += tie_xml % "start" + tie_notation += tie_notation_xml % "start" + note = """ + + C + 5 + + %s +%s 1 + %s +%s%s """ % (dur_processed, tie, notetype, dot, tie_notation) + if second_note: + return "%s\n%s" % (note, second_note) + else: + return note + +def print_measure (nr, beats, type, params = "", attr = "", attr2 = "", barline = ""): + duration = 8*beats/type + note = generate_note (duration) + + print """ + +%s + %s + %s + +%s +%s +%s """ % (nr, attr, params, beats, type, attr2, note, barline) + +first_key = """ 2 + + 0 + major + +""" +first_clef = """ + G + 2 + +""" + +final_barline = """ + light-heavy + +""" + +print """ + + + + + Various time signatures: 2/2 + (alla breve), 4/4 (C), 2/2, 3/2, 2/4, 3/4, 4/4, 5/4, 3/8, 6/8, + 12/8 + + + + + MusicXML Part + + + + """ + +measure = 1 + +print_measure (measure, 2, 2, " symbol=\"common\"", first_key, first_clef) +measure += 1 + +print_measure (measure, 4, 4, " symbol=\"common\"") +measure += 1 + +print_measure (measure, 2, 2) +measure += 1 + +print_measure (measure, 3, 2) +measure += 1 + +print_measure (measure, 2, 4) +measure += 1 + +print_measure (measure, 3, 4) +measure += 1 + +print_measure (measure, 4, 4) +measure += 1 + +print_measure (measure, 5, 4) +measure += 1 + +print_measure (measure, 3, 8) +measure += 1 + +print_measure (measure, 6, 8) +measure += 1 + +print_measure (measure, 12, 8, "", "", "", final_barline) +measure += 1 + +print """ +""" diff --git a/scripts/aux/pfx2ttf.fontforge b/scripts/aux/pfx2ttf.fontforge new file mode 100644 index 0000000000..7d87fae17a --- /dev/null +++ b/scripts/aux/pfx2ttf.fontforge @@ -0,0 +1,29 @@ +Open($1); +MergeKern($2) + + +# The AFM files of `New Century Schoolbook' family as distributed within the +# urw-fonts-1.0.7pre41.tar.bz2 archive contain a bunch of ligatures which +# shouldn't be active by default: +# +# T + M -> trademark +# N + o -> afii61352 +# i + j -> ij +# I + J -> IJ +# +# This font bundle is shipped by Fedora Core 6 and other GNU/Linux +# distributions; we simply remove those ligatures. + +SelectIf("trademark", "trademark", \ + "afii61352", "afii61352", \ + "ij", "ij", \ + "IJ", "IJ"); +if (Strtol($version) < 20070501) + RemoveATT("Ligature", "*", "*"); +else + RemovePosSub("*"); +endif + +Generate($3 + $fontname + ".otf"); + +# EOF diff --git a/scripts/aux/readlink.py b/scripts/aux/readlink.py new file mode 100755 index 0000000000..70267ffa59 --- /dev/null +++ b/scripts/aux/readlink.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python +import os +import sys + +for i in sys.argv[1:]: + print os.path.realpath (i) diff --git a/scripts/aux/tely-gettext.py b/scripts/aux/tely-gettext.py new file mode 100755 index 0000000000..12f80aa8dc --- /dev/null +++ b/scripts/aux/tely-gettext.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Temporary script that helps translated docs sources conversion +# for texi2html processing + +# USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES + +print "tely-gettext.py" + +import sys +import re +import os +import gettext + +if len (sys.argv) > 3: + buildscript_dir, localedir, lang = sys.argv[1:4] +else: + print """USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES + For example scripts/aux/tely-gettext.py python/out Documentation/po/out-www de Documentation/de/user/*.tely""" + sys.exit (1) + +sys.path.append (buildscript_dir) +import langdefs + +double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep +t = gettext.translation('lilypond-doc', localedir, [lang]) +_doc = t.gettext + +include_re = re.compile (r'@include (.*?)$', re.M) +whitespaces = re.compile (r'\s+') +ref_re = re.compile (r'(?ms)@(ruser|rprogram|ref|rlearning)\{(.*?)\}') +node_section_re = re.compile (r'@node (.*?)\n@((?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) (.*?)\n') +menu_entry_re = re.compile (r'\* (.*?)::') + +def ref_gettext (m): + r = whitespaces.sub (' ', m.group (2)) + return '@' + m.group (1) + '{' + _doc (r) + '}' + +def node_gettext (m): + return '@node ' + _doc (m.group (1)) + '\n@' + \ + m.group (2) + ' ' + _doc (m.group (3)) + \ + '\n@translationof ' + m.group (1) + '\n' + +def menu_entry_gettext (m): + return '* ' + _doc (m.group (1)) + '::' + +def process_file (filename): + print "Processing %s" % filename + f = open (filename, 'r') + page = f.read () + f.close() + page = node_section_re.sub (node_gettext, page) + page = ref_re.sub (ref_gettext, page) + page = menu_entry_re.sub (menu_entry_gettext, page) + page = page.replace ("""-- SKELETON FILE -- +When you actually translate this file, please remove these lines as +well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", """@c -- SKELETON FILE --""") + page = page.replace ('UNTRANSLATED NODE: IGNORE ME', "@c UNTRANSLATED NODE: IGNORE ME") + includes = [whitespaces.sub ('', f) for f in include_re.findall (page)] + f = open (filename, 'w') + f.write (page) + f.close () + dir = os.path.dirname (filename) + for file in includes: + p = os.path.join (dir, file) + if os.path.exists (p): + process_file (p) + +for filename in sys.argv[4:]: + process_file (filename) diff --git a/scripts/aux/texi-langutils.py b/scripts/aux/texi-langutils.py new file mode 100755 index 0000000000..7c34ce4adb --- /dev/null +++ b/scripts/aux/texi-langutils.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python +# texi-langutils.py + +# WARNING: this script can't find files included in a different directory + +import sys +import re +import getopt +import os + +import langdefs + +def read_pipe (command): + print command + pipe = os.popen (command) + output = pipe.read () + if pipe.close (): + print "pipe failed: %(command)s" % locals () + return output + + +optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext']) +process_includes = not ('-n', '') in optlist # -n don't process @include's in texinfo files + +make_gettext = ('--gettext', '') in optlist # --gettext generate a node list from a Texinfo source +make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source + +output_file = 'doc.pot' + +# @untranslated should be defined as a macro in Texinfo source +node_blurb = '''@untranslated +''' +doclang = '' +head_committish = read_pipe ('git-rev-parse HEAD') +intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*- +@c This file is part of %(topfile)s +@ignore + Translation of GIT committish: %(head_committish)s + When revising a translation, copy the HEAD committish of the + version that you are working on. See TRANSLATION for details. +@end ignore +''' + +end_blurb = """ +@c -- SKELETON FILE -- +""" + +for x in optlist: + if x[0] == '-o': # -o NAME set PO output file name to NAME + output_file = x[1] + elif x[0] == '-d': # -d DIR set working directory to DIR + os.chdir (x[1]) + elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB + node_blurb = x[1] + elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB + intro_blurb = x[1] + elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG + doclang = '; documentlanguage: ' + x[1] + +texinfo_with_menus_re = re.compile (r"^(\*) +([^:\n]+)::.*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.*?)$|@(rglos){(.+?)}", re.M) + +texinfo_re = re.compile (r"^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M) + +ly_string_re = re.compile (r'^([a-zA-Z]+)[\t ]*=|%+[\t ]*(.*)$|\\(?:new|context)\s+(?:[a-zA-Z]*?(?:Staff(?:Group)?|Voice|FiguredBass|FretBoards|Names|Devnull))\s+=\s+"?([a-zA-Z]+)"?\s+') +lsr_verbatim_ly_re = re.compile (r'% begin verbatim$') +texinfo_verbatim_ly_re = re.compile (r'^@lilypond\[.*?verbatim') + +def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None, scan_ly=False): + try: + f = open (texifilename, 'r') + texifile = f.read () + f.close () + printedfilename = texifilename.replace ('../','') + includes = [] + + # process ly var names and comments + if output_file and (scan_ly or texifilename.endswith ('.ly')): + lines = texifile.splitlines () + i = 0 + in_verb_ly_block = False + if texifilename.endswith ('.ly'): + verbatim_ly_re = lsr_verbatim_ly_re + else: + verbatim_ly_re = texinfo_verbatim_ly_re + for i in range (len (lines)): + if verbatim_ly_re.search (lines[i]): + in_verb_ly_block = True + elif lines[i].startswith ('@end lilypond'): + in_verb_ly_block = False + elif in_verb_ly_block: + for (var, comment, context_id) in ly_string_re.findall (lines[i]): + if var: + output_file.write ('# ' + printedfilename + ':' + \ + str (i + 1) + ' (variable)\n_(r"' + var + '")\n') + elif comment: + output_file.write ('# ' + printedfilename + ':' + \ + str (i + 1) + ' (comment)\n_(r"' + \ + comment.replace ('"', '\\"') + '")\n') + elif context_id: + output_file.write ('# ' + printedfilename + ':' + \ + str (i + 1) + ' (context id)\n_(r"' + \ + context_id + '")\n') + + # process Texinfo node names and section titles + if write_skeleton: + g = open (os.path.basename (texifilename), 'w') + subst = globals () + subst.update (locals ()) + g.write (i_blurb % subst) + tutu = texinfo_with_menus_re.findall (texifile) + node_trigger = False + for item in tutu: + if item[0] == '*': + g.write ('* ' + item[1] + '::\n') + elif output_file and item[4] == 'rglos': + output_file.write ('_(r"' + item[5] + '") # @rglos in ' + printedfilename + '\n') + elif item[2] == 'menu': + g.write ('@menu\n') + elif item[2] == 'end menu': + g.write ('@end menu\n\n') + else: + g.write ('@' + item[2] + ' ' + item[3] + '\n') + if node_trigger: + g.write (n_blurb) + node_trigger = False + elif item[2] == 'include': + includes.append (item[3]) + else: + if output_file: + output_file.write ('# @' + item[2] + ' in ' + \ + printedfilename + '\n_(r"' + item[3].strip () + '")\n') + if item[2] == 'node': + node_trigger = True + g.write (end_blurb) + g.close () + + elif output_file: + toto = texinfo_re.findall (texifile) + for item in toto: + if item[0] == 'include': + includes.append(item[1]) + elif item[2] == 'rglos': + output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n') + else: + output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n') + + if process_includes: + dir = os.path.dirname (texifilename) + for item in includes: + process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file, scan_ly) + except IOError, (errno, strerror): + sys.stderr.write ("I/O error(%s): %s: %s\n" % (errno, texifilename, strerror)) + + +if intro_blurb != '': + intro_blurb += '\n\n' +if node_blurb != '': + node_blurb = '\n' + node_blurb + '\n\n' +if make_gettext: + node_list_filename = 'node_list' + node_list = open (node_list_filename, 'w') + node_list.write ('# -*- coding: utf-8 -*-\n') + for texi_file in texi_files: + # Urgly: scan ly comments and variable names only in English doco + is_english_doc = 'Documentation/user' in texi_file + process_texi (texi_file, intro_blurb, node_blurb, make_skeleton, + os.path.basename (texi_file), node_list, + scan_ly=is_english_doc) + for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'): + node_list.write ('_(r"' + word + '")\n') + node_list.close () + os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename) +else: + for texi_file in texi_files: + process_texi (texi_file, intro_blurb, node_blurb, make_skeleton, + os.path.basename (texi_file)) diff --git a/scripts/aux/texi-skeleton-update.py b/scripts/aux/texi-skeleton-update.py new file mode 100755 index 0000000000..250b52e836 --- /dev/null +++ b/scripts/aux/texi-skeleton-update.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# texi-skeleton-update.py + +import sys +import glob +import os +import shutil + +sys.stderr.write ('texi-skeleton-update.py\n') + +orig_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[1] + '/*.ite??')]) +new_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[2] + '/*.ite??')]) + +for f in new_skeletons: + if f in orig_skeletons: + g = open (os.path.join (sys.argv[1], f), 'r').read () + if '-- SKELETON FILE --' in g: + sys.stderr.write ("Updating %s...\n" % f) + shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1]) + elif f != 'fdl.itexi': + sys.stderr.write ("Copying new file %s...\n" % f) + shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1]) + +for f in orig_skeletons.difference (new_skeletons): + sys.stderr.write ("Warning: outdated skeleton file %s\n" % f) diff --git a/scripts/aux/translations-status.py b/scripts/aux/translations-status.py new file mode 100755 index 0000000000..c93199354f --- /dev/null +++ b/scripts/aux/translations-status.py @@ -0,0 +1,578 @@ +#!/usr/bin/env python + +""" +USAGE: translations-status.py BUILDSCRIPT-DIR LOCALEDIR + + This script must be run from Documentation/ + + Reads template files translations.template.html.in +and for each LANG in LANGUAGES LANG/translations.template.html.in + Writes translations.html.in and for each LANG in LANGUAGES +translations.LANG.html.in + Writes out/translations-status.txt + Updates word counts in TRANSLATION +""" + +import sys +import re +import string +import os + +import langdefs +import buildlib + +def progress (str): + sys.stderr.write (str + '\n') + +progress ("translations-status.py") + +_doc = lambda s: s + +# load gettext messages catalogs +translation = langdefs.translation + + +language_re = re.compile (r'^@documentlanguage (.+)', re.M) +comments_re = re.compile (r'^@ignore\n(.|\n)*?\n@end ignore$|@c .*?$', re.M) +space_re = re.compile (r'\s+', re.M) +lilypond_re = re.compile (r'@lilypond({.*?}|(.|\n)*?\n@end lilypond$)', re.M) +node_re = re.compile ('^@node .*?$', re.M) +title_re = re.compile ('^@(top|chapter|(?:sub){0,2}section|' + \ +'(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?) (.*?)$', re.M) +include_re = re.compile ('^@include (.*?)$', re.M) + +translators_re = re.compile (r'^@c\s+Translators\s*:\s*(.*?)$', re.M | re.I) +checkers_re = re.compile (r'^@c\s+Translation\s*checkers\s*:\s*(.*?)$', + re.M | re.I) +status_re = re.compile (r'^@c\s+Translation\s*status\s*:\s*(.*?)$', re.M | re.I) +post_gdp_re = re.compile ('post.GDP', re.I) +untranslated_node_str = '@untranslated' +skeleton_str = '-- SKELETON FILE --' + +section_titles_string = _doc ('Section titles') +last_updated_string = _doc ('

    Last updated %s

    \n') +detailed_status_heads = [_doc ('Translators'), _doc ('Translation checkers'), + _doc ('Translated'), _doc ('Up to date'), + _doc ('Other info')] +format_table = { + 'not translated': {'color':'d0f0f8', 'short':_doc ('no'), 'abbr':'NT', + 'long':_doc ('not translated')}, + 'partially translated': {'color':'dfef77', + 'short':_doc ('partially (%(p)d %%)'), + 'abbr':'%(p)d%%', + 'long':_doc ('partially translated (%(p)d %%)')}, + 'fully translated': {'color':'1fff1f', 'short':_doc ('yes'), 'abbr':'FT', + 'long': _doc ('translated')}, + 'up to date': {'short':_doc ('yes'), 'long':_doc ('up to date'), + 'abbr':'100%%', 'vague':_doc ('up to date')}, + 'outdated': {'short':_doc ('partially'), 'abbr':'%(p)d%%', + 'vague':_doc ('partially up to date')}, + 'N/A': {'short':_doc ('N/A'), 'abbr':'N/A', 'color':'d587ff', 'vague':''}, + 'pre-GDP':_doc ('pre-GDP'), + 'post-GDP':_doc ('post-GDP') +} + +texi_level = { +# (Unumbered/Numbered/Lettered, level) + 'top': ('u', 0), + 'unnumbered': ('u', 1), + 'unnumberedsec': ('u', 2), + 'unnumberedsubsec': ('u', 3), + 'chapter': ('n', 1), + 'section': ('n', 2), + 'subsection': ('n', 3), + 'appendix': ('l', 1) +} + +appendix_number_trans = string.maketrans ('@ABCDEFGHIJKLMNOPQRSTUVWXY', + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ') + +class SectionNumber (object): + def __init__ (self): + self.__data = [[0,'u']] + + def __increase_last_index (self): + type = self.__data[-1][1] + if type == 'l': + self.__data[-1][0] = \ + self.__data[-1][0].translate (appendix_number_trans) + elif type == 'n': + self.__data[-1][0] += 1 + + def format (self): + if self.__data[-1][1] == 'u': + return '' + return '.'.join ([str (i[0]) for i in self.__data if i[1] != 'u']) + ' ' + + def increase (self, (type, level)): + if level == 0: + self.__data = [[0,'u']] + while level + 1 < len (self.__data): + del self.__data[-1] + if level + 1 > len (self.__data): + self.__data.append ([0, type]) + if type == 'l': + self.__data[-1][0] = '@' + if type == self.__data[-1][1]: + self.__increase_last_index () + else: + self.__data[-1] = ([0, type]) + if type == 'l': + self.__data[-1][0] = 'A' + elif type == 'n': + self.__data[-1][0] = 1 + return self.format () + + +def percentage_color (percent): + p = percent / 100.0 + if p < 0.33: + c = [hex (int (3 * p * b + (1 - 3 * p) * a))[2:] + for (a, b) in [(0xff, 0xff), (0x5c, 0xa6), (0x5c, 0x4c)]] + elif p < 0.67: + c = [hex (int ((3 * p - 1) * b + (2 - 3 * p) * a))[2:] + for (a, b) in [(0xff, 0xff), (0xa6, 0xff), (0x4c, 0x3d)]] + else: + c = [hex (int ((3 * p - 2) * b + 3 * (1 - p) * a))[2:] + for (a, b) in [(0xff, 0x1f), (0xff, 0xff), (0x3d, 0x1f)]] + return ''.join (c) + + +def update_word_count (text, filename, word_count): + return re.sub (r'(?m)^(\d+) *' + filename, + str (word_count).ljust (6) + filename, + text) + +po_msgid_re = re.compile (r'^msgid "(.*?)"(?:\n"(.*?)")*', re.M) + +def po_word_count (po_content): + s = ' '.join ([''.join (t) for t in po_msgid_re.findall (po_content)]) + return len (space_re.split (s)) + +sgml_tag_re = re.compile (r'<.*?>', re.S) + +def sgml_word_count (sgml_doc): + s = sgml_tag_re.sub ('', sgml_doc) + return len (space_re.split (s)) + +def tely_word_count (tely_doc): + ''' + Calculate word count of a Texinfo document node by node. + + Take string tely_doc as an argument. + Return a list of integers. + + Texinfo comments and @lilypond blocks are not included in word counts. + ''' + tely_doc = comments_re.sub ('', tely_doc) + tely_doc = lilypond_re.sub ('', tely_doc) + nodes = node_re.split (tely_doc) + return [len (space_re.split (n)) for n in nodes] + + +class TelyDocument (object): + def __init__ (self, filename): + self.filename = filename + self.contents = open (filename).read () + + ## record title and sectionning level of first Texinfo section + m = title_re.search (self.contents) + if m: + self.title = m.group (2) + self.level = texi_level [m.group (1)] + else: + self.title = 'Untitled' + self.level = ('u', 1) + + m = language_re.search (self.contents) + if m: + self.language = m.group (1) + + included_files = [os.path.join (os.path.dirname (filename), t) + for t in include_re.findall (self.contents)] + self.included_files = [p for p in included_files if os.path.exists (p)] + + def print_title (self, section_number): + return section_number.increase (self.level) + self.title + + +class TranslatedTelyDocument (TelyDocument): + def __init__ (self, filename, masterdocument, parent_translation=None): + TelyDocument.__init__ (self, filename) + + self.masterdocument = masterdocument + if not hasattr (self, 'language') \ + and hasattr (parent_translation, 'language'): + self.language = parent_translation.language + if hasattr (self, 'language'): + self.translation = translation[self.language] + else: + self.translation = lambda x: x + self.title = self.translation (self.title) + + ## record authoring information + m = translators_re.search (self.contents) + if m: + self.translators = [n.strip () for n in m.group (1).split (',')] + else: + self.translators = parent_translation.translators + m = checkers_re.search (self.contents) + if m: + self.checkers = [n.strip () for n in m.group (1).split (',')] + elif isinstance (parent_translation, TranslatedTelyDocument): + self.checkers = parent_translation.checkers + else: + self.checkers = [] + + ## check whether translation is pre- or post-GDP + m = status_re.search (self.contents) + if m: + self.post_gdp = bool (post_gdp_re.search (m.group (1))) + else: + self.post_gdp = False + + ## record which parts (nodes) of the file are actually translated + self.partially_translated = not skeleton_str in self.contents + nodes = node_re.split (self.contents) + self.translated_nodes = [not untranslated_node_str in n for n in nodes] + + ## calculate translation percentage + master_total_word_count = sum (masterdocument.word_count) + translation_word_count = \ + sum ([masterdocument.word_count[k] * self.translated_nodes[k] + for k in range (min (len (masterdocument.word_count), + len (self.translated_nodes)))]) + self.translation_percentage = \ + 100 * translation_word_count / master_total_word_count + + ## calculate how much the file is outdated + (diff_string, error) = \ + buildlib.check_translated_doc (masterdocument.filename, self.filename, self.contents) + if error: + sys.stderr.write ('warning: %s: %s' % (self.filename, error)) + self.uptodate_percentage = None + else: + diff = diff_string.splitlines () + insertions = sum ([len (l) - 1 for l in diff + if l.startswith ('+') + and not l.startswith ('+++')]) + deletions = sum ([len (l) - 1 for l in diff + if l.startswith ('-') + and not l.startswith ('---')]) + outdateness_percentage = 50.0 * (deletions + insertions) / \ + (masterdocument.size + 0.5 * (deletions - insertions)) + self.uptodate_percentage = 100 - int (outdateness_percentage) + if self.uptodate_percentage > 100: + alternative = 50 + progress ("%s: strange uptodateness percentage %d %%, \ +setting to %d %%" % (self.filename, self.uptodate_percentage, alternative)) + self.uptodate_percentage = alternative + elif self.uptodate_percentage < 1: + alternative = 1 + progress ("%s: strange uptodateness percentage %d %%, \ +setting to %d %%" % (self.filename, self.uptodate_percentage, alternative)) + self.uptodate_percentage = alternative + + def completeness (self, formats=['long'], translated=False): + if translated: + translation = self.translation + else: + translation = lambda x: x + + if isinstance (formats, str): + formats = [formats] + p = self.translation_percentage + if p == 0: + status = 'not translated' + elif p == 100: + status = 'fully translated' + else: + status = 'partially translated' + return dict ([(f, translation (format_table[status][f]) % locals()) + for f in formats]) + + def uptodateness (self, formats=['long'], translated=False): + if translated: + translation = self.translation + else: + translation = lambda x: x + + if isinstance (formats, str): + formats = [formats] + p = self.uptodate_percentage + if p == None: + status = 'N/A' + elif p == 100: + status = 'up to date' + else: + status = 'outdated' + l = {} + for f in formats: + if f == 'color' and p != None: + l['color'] = percentage_color (p) + else: + l[f] = translation (format_table[status][f]) % locals () + return l + + def gdp_status (self): + if self.post_gdp: + return self.translation (format_table['post-GDP']) + else: + return self.translation (format_table['pre-GDP']) + + def short_html_status (self): + s = ' ' + if self.partially_translated: + s += '
    \n '.join (self.translators) + '
    \n' + if self.checkers: + s += ' ' + \ + '
    \n '.join (self.checkers) + '

    \n' + + c = self.completeness (['color', 'long']) + s += ' \ +%(long)s
    \n' % c + + if self.partially_translated: + u = self.uptodateness (['vague', 'color']) + s += ' \ +%(vague)s
    \n' % u + + s += ' \n' + return s + + def text_status (self): + s = self.completeness ('abbr')['abbr'] + ' ' + + if self.partially_translated: + s += self.uptodateness ('abbr')['abbr'] + ' ' + return s + + def html_status (self, numbering=SectionNumber ()): + if self.title == 'Untitled': + return '' + + if self.level[1] == 0: # if self is a master document + s = ''' + + ''' % self.print_title (numbering) + s += ''.join ([' \n' % self.translation (h) + for h in detailed_status_heads]) + s += ' \n' + s += ' \n \n' \ + % (self.translation (section_titles_string), + sum (self.masterdocument.word_count)) + + else: + s = ' \n \n' \ + % (self.print_title (numbering), + sum (self.masterdocument.word_count)) + + if self.partially_translated: + s += ' \n' + s += ' \n' + else: + s += ' \n' * 2 + + c = self.completeness (['color', 'short'], translated=True) + s += ' \n' % {'color': c['color'], + 'short': c['short']} + + if self.partially_translated: + u = self.uptodateness (['short', 'color'], translated=True) + s += ' \n' % {'color': u['color'], + 'short': u['short']} + else: + s += ' \n' + + s += ' \n \n' + s += ''.join ([i.translations[self.language].html_status (numbering) + for i in self.masterdocument.includes + if self.language in i.translations]) + + if self.level[1] == 0: # if self is a master document + s += '
    %s%s
    %s
    (%d)
    %s
    (%d)
    ' + '
    \n '.join (self.translators) + '
    ' + '
    \n '.join (self.checkers) + '
    \ +%(short)s\ +%(short)s' + self.gdp_status () + '
    \n

    \n' + return s + +class MasterTelyDocument (TelyDocument): + def __init__ (self, + filename, + parent_translations=dict ([(lang, None) + for lang in langdefs.LANGDICT])): + TelyDocument.__init__ (self, filename) + self.size = len (self.contents) + self.word_count = tely_word_count (self.contents) + translations = dict ([(lang, os.path.join (lang, filename)) + for lang in langdefs.LANGDICT]) + self.translations = \ + dict ([(lang, + TranslatedTelyDocument (translations[lang], + self, parent_translations.get (lang))) + for lang in langdefs.LANGDICT + if os.path.exists (translations[lang])]) + if self.translations: + self.includes = [MasterTelyDocument (f, self.translations) + for f in self.included_files] + else: + self.includes = [] + + def update_word_counts (self, s): + s = update_word_count (s, self.filename, sum (self.word_count)) + for i in self.includes: + s = i.update_word_counts (s) + return s + + def html_status (self, numbering=SectionNumber ()): + if self.title == 'Untitled' or not self.translations: + return '' + if self.level[1] == 0: # if self is a master document + s = ''' + + ''' % self.print_title (numbering) + s += ''.join ([' \n' % l for l in self.translations]) + s += ' \n' + s += ' \n \n' \ + % sum (self.word_count) + + else: # if self is an included file + s = ' \n \n' \ + % (self.print_title (numbering), sum (self.word_count)) + + s += ''.join ([t.short_html_status () + for t in self.translations.values ()]) + s += ' \n' + s += ''.join ([i.html_status (numbering) for i in self.includes]) + + if self.level[1] == 0: # if self is a master document + s += '
    %s%s
    Section titles
    (%d)
    %s
    (%d)
    \n

    \n' + return s + + def text_status (self, numbering=SectionNumber (), colspec=[48,12]): + if self.title == 'Untitled' or not self.translations: + return '' + + s = '' + if self.level[1] == 0: # if self is a master document + s += (self.print_title (numbering) + ' ').ljust (colspec[0]) + s += ''.join (['%s'.ljust (colspec[1]) % l + for l in self.translations]) + s += '\n' + s += ('Section titles (%d)' % \ + sum (self.word_count)).ljust (colspec[0]) + + else: + s = '%s (%d) ' \ + % (self.print_title (numbering), sum (self.word_count)) + s = s.ljust (colspec[0]) + + s += ''.join ([t.text_status ().ljust(colspec[1]) + for t in self.translations.values ()]) + s += '\n\n' + s += ''.join ([i.text_status (numbering) for i in self.includes]) + + if self.level[1] == 0: + s += '\n' + return s + + +update_category_word_counts_re = re.compile (r'(?ms)^-(\d+)-(.*?\n)\d+ *total') + +counts_re = re.compile (r'(?m)^(\d+) ') + +def update_category_word_counts_sub (m): + return '-' + m.group (1) + '-' + m.group (2) + \ + str (sum ([int (c) + for c in counts_re.findall (m.group (2))])).ljust (6) + \ + 'total' + + +progress ("Reading documents...") + +tely_files = \ + buildlib.read_pipe ("find -maxdepth 2 -name '*.tely'")[0].splitlines () +tely_files.sort () +master_docs = [MasterTelyDocument (os.path.normpath (filename)) + for filename in tely_files] +master_docs = [doc for doc in master_docs if doc.translations] + +main_status_page = open ('translations.template.html.in').read () + +enabled_languages = [l for l in langdefs.LANGDICT + if langdefs.LANGDICT[l].enabled + and l != 'en'] +lang_status_pages = \ + dict ([(l, open (os.path.join (l, 'translations.template.html.in')). read ()) + for l in enabled_languages]) + +progress ("Generating status pages...") + +date_time = buildlib.read_pipe ('LANG= date -u')[0] + +main_status_html = last_updated_string % date_time +main_status_html += '\n'.join ([doc.html_status () for doc in master_docs]) + +html_re = re.compile ('', re.I) +end_body_re = re.compile ('', re.I) + +html_header = ''' +''' + +main_status_page = html_re.sub (html_header, main_status_page) + +main_status_page = end_body_re.sub (main_status_html + '\n', + main_status_page) + +open ('translations.html.in', 'w').write (main_status_page) + +for l in enabled_languages: + date_time = buildlib.read_pipe ('LANG=%s date -u' % l)[0] + lang_status_pages[l] = translation[l] (last_updated_string) % date_time + lang_status_pages[l] + lang_status_page = html_re.sub (html_header, lang_status_pages[l]) + html_status = '\n'.join ([doc.translations[l].html_status () + for doc in master_docs + if l in doc.translations]) + lang_status_page = end_body_re.sub (html_status + '\n', + lang_status_page) + open (os.path.join (l, 'translations.html.in'), 'w').write (lang_status_page) + +main_status_txt = '''Documentation translations status +Generated %s +NT = not translated +FT = fully translated + +''' % date_time + +main_status_txt += '\n'.join ([doc.text_status () for doc in master_docs]) + +status_txt_file = 'out/translations-status.txt' +progress ("Writing %s..." % status_txt_file) +open (status_txt_file, 'w').write (main_status_txt) + +translation_instructions_file = 'TRANSLATION' +progress ("Updating %s..." % translation_instructions_file) +translation_instructions = open (translation_instructions_file).read () + +for doc in master_docs: + translation_instructions = doc.update_word_counts (translation_instructions) + +for html_file in re.findall (r'(?m)^\d+ *(\S+?\.html\S*?)(?: |$)', + translation_instructions): + word_count = sgml_word_count (open (html_file).read ()) + translation_instructions = update_word_count (translation_instructions, + html_file, + word_count) + +for po_file in re.findall (r'(?m)^\d+ *(\S+?\.po\S*?)(?: |$)', + translation_instructions): + word_count = po_word_count (open (po_file).read ()) + translation_instructions = update_word_count (translation_instructions, + po_file, + word_count) + +translation_instructions = \ + update_category_word_counts_re.sub (update_category_word_counts_sub, + translation_instructions) + +open (translation_instructions_file, 'w').write (translation_instructions) diff --git a/scripts/aux/update-snippets.py b/scripts/aux/update-snippets.py new file mode 100755 index 0000000000..6ccdbc1ecd --- /dev/null +++ b/scripts/aux/update-snippets.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +# update-snippets.py + +# USAGE: update-snippets.py REFERENCE-DIR TARGET-DIR FILES +# +# update ly snippets in TARGET-DIR/FILES with snippets from REFERENCE-DIR/FILES +# +# More precisely, each existing FILE in TARGET-DIR is matched to the FILE in +# REFERENCE-DIR (it the latter does not exist, a warning is given). +# +# Shell wildcards expansion is performed on FILES. +# This script currently supports Texinfo format. +# Ly snippets preceded with a line containing '@c KEEP LY' in TARGET-DIR/FILES +# will not be updated. +# An error occurs if REFERENCE-DIR/FILE and TARGET-DIR/FILE do not have the +# same snippets count. + +import sys +import os +import glob +import re + +print "update-snippets.py" + +comment_re = re.compile (r'(? 0 and (not target_source[j-1].startswith ('@c KEEP LY')) and target_source[j] != ref_source[k]: + target_source[j] = ref_source[k] + c += 1 + changed_snippets_count += 1 + f = open (file, 'w') + f.write (''.join (target_source)) + sys.stderr.write ('%s: %d/%d snippets updated\n' % (file, c, snippet_count)) + +sys.stderr.write ('\nTotal: %d snippets, %d updated snippets.\n' % (total_snippet_count, changed_snippets_count)) +sys.exit (exit_code) diff --git a/scripts/build/GNUmakefile b/scripts/build/GNUmakefile new file mode 100644 index 0000000000..5a62a47a98 --- /dev/null +++ b/scripts/build/GNUmakefile @@ -0,0 +1,14 @@ +depth = ../.. + +STEPMAKE_TEMPLATES=script install po + +include $(depth)/make/stepmake.make + +# Should we install these? This should be handled by sysadmin or +# packager but if she forgets... +#INSTALLATION_OUT_SUFFIXES=1 +#INSTALLATION_OUT_DIR1=$(local_lilypond_datadir)/scripts +#INSTALLATION_OUT_FILES1=$(outdir)/lilypond-login $(outdir)/lilypond-profile + +all: $(INSTALLATION_FILES) + diff --git a/scripts/build/bib2html.py b/scripts/build/bib2html.py new file mode 100644 index 0000000000..c16f21cce2 --- /dev/null +++ b/scripts/build/bib2html.py @@ -0,0 +1,76 @@ +#!@PYTHON@ +import os +import sys +import getopt +import tempfile + +# usage: +def usage (): + print 'usage: %s [-s style] [-o ] BIBFILES...' + +(options, files) = getopt.getopt (sys.argv[1:], 's:o:', []) + +output = 'bib.html' +style = 'long' + +for (o,a) in options: + if o == '-h' or o == '--help': + usage () + sys.exit (0) + elif o == '-s' or o == '--style': + style = a + elif o == '-o' or o == '--output': + output = a + else: + raise Exception ('unknown option: %s' % o) + + +if style not in ['alpha','index','long','longp','long-pario','short','short-pario','split']: + sys.stderr.write ("Unknown style \`%s'\n" % style) + +tempfile = tempfile.mktemp ('bib2html') + +if not files: + usage () + sys.exit (2) + + +def strip_extension (f, ext): + (p, e) = os.path.splitext (f) + if e == ext: + e = '' + return p + e + +nf = [] +for f in files: + nf.append (strip_extension (f, '.bib')) + +files = ','.join (nf) + +open (tempfile + '.aux', 'w').write (r''' +\relax +\citation{*} +\bibstyle{html-%(style)s} +\bibdata{%(files)s}''' % vars ()) + +cmd = "bibtex %s" % tempfile + +sys.stdout.write ("Invoking `%s'\n" % cmd) +stat = os.system (cmd) +if stat <> 0: + sys.exit(1) + + +#TODO: do tex -> html on output + +bbl = open (tempfile + '.bbl').read () + +open (output, 'w').write (bbl) + + +def cleanup (tempfile): + for a in ['aux','bbl', 'blg']: + os.unlink (tempfile + '.' + a) + +cleanup (tempfile) + diff --git a/scripts/build/catmidi.py b/scripts/build/catmidi.py new file mode 100644 index 0000000000..c90d602627 --- /dev/null +++ b/scripts/build/catmidi.py @@ -0,0 +1,12 @@ +#!@PYTHON@ + +import sys +import midi + +(h,tracks) = midi.parse (open (sys.argv[1]).read ()) + +tracks = tracks[1:] + +for t in tracks: + for e in t: + print e diff --git a/scripts/build/extract_texi_filenames.py b/scripts/build/extract_texi_filenames.py new file mode 100644 index 0000000000..5798d5dab2 --- /dev/null +++ b/scripts/build/extract_texi_filenames.py @@ -0,0 +1,170 @@ +#!@PYTHON@ +# -*- coding: utf-8 -*- +# extract_texi_filenames.py + +# USAGE: extract_texi_filenames.py [-o OUTDIR] FILES +# +# -o OUTDIR specifies that output files should rather be written in OUTDIR +# +# Description: +# This script parses the .texi file given and creates a file with the +# nodename <=> filename/anchor map. +# The idea behind: Unnumbered subsections go into the same file as the +# previous numbered section, @translationof gives the original node name, +# which is then used for the filename/anchor. +# +# If this script is run on a file texifile.texi, it produces a file +# texifile[.LANG].xref-map with tab-separated entries of the form +# NODE\tFILENAME\tANCHOR +# LANG is the document language in case it's not 'en' +# Note: The filename does not have any extension appended! +# This file can then be used by our texi2html init script to determine +# the correct file name and anchor for external refs + +import sys +import re +import os +import getopt + +optlist, args = getopt.getopt (sys.argv[1:],'o:') +files = args + +outdir = '.' +for x in optlist: + if x[0] == '-o': + outdir = x[1] + +if not os.path.isdir (outdir): + if os.path.exists (outdir): + os.unlink (outdir) + os.makedirs (outdir) + +include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M) +whitespaces = re.compile (r'\s+') +section_translation_re = re.compile ('^@(node|(?:unnumbered|appendix)\ +(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|\ +(?:major|chap|(?:sub){0,2})heading|translationof) (.*?)\\s*$', re.MULTILINE) + +def expand_includes (m, filename): + filepath = os.path.join (os.path.dirname (filename), m.group(1)) + '.texi' + if os.path.exists (filepath): + return extract_sections (filepath)[1] + else: + print "Unable to locate include file " + filepath + return '' + +lang_re = re.compile (r'^@documentlanguage (.+)', re.M) + +def extract_sections (filename): + result = '' + f = open (filename, 'r') + page = f.read () + f.close() + # Search document language + m = lang_re.search (page) + if m and m.group (1) != 'en': + lang_suffix = '.' + m.group (1) + else: + lang_suffix = '' + # Replace all includes by their list of sections and extract all sections + page = include_re.sub (lambda m: expand_includes (m, filename), page) + sections = section_translation_re.findall (page) + for sec in sections: + result += "@" + sec[0] + " " + sec[1] + "\n" + return (lang_suffix, result) + +# Convert a given node name to its proper file name (normalization as explained +# in the texinfo manual: +# http://www.gnu.org/software/texinfo/manual/texinfo/html_node/HTML-Xref-Node-Name-Expansion.html +def texinfo_file_name(title): + # exception: The top node is always mapped to index.html + if title == "Top": + return "index" + # File name normalization by texinfo (described in the texinfo manual): + # 1/2: letters and numbers are left unchanged + # 3/4: multiple, leading and trailing whitespace is removed + title = title.strip (); + title = whitespaces.sub (' ', title) + # 5: all remaining spaces are converted to '-' + # 6: all other 7- or 8-bit chars are replaced by _xxxx (xxxx=ascii character code) + result = '' + for index in range(len(title)): + char = title[index] + if char == ' ': # space -> '-' + result += '-' + elif ( ('0' <= char and char <= '9' ) or + ('A' <= char and char <= 'Z' ) or + ('a' <= char and char <= 'z' ) ): # number or letter + result += char + else: + ccode = ord(char) + if ccode <= 0xFFFF: + result += "_%04x" % ccode + else: + result += "__%06x" % ccode + # 7: if name begins with number, prepend 't_g' (so it starts with a letter) + if (result != '') and (ord(result[0]) in range (ord('0'), ord('9'))): + result = 't_g' + result + return result + +texinfo_re = re.compile (r'@.*{(.*)}') +def remove_texinfo (title): + return texinfo_re.sub (r'\1', title) + +def create_texinfo_anchor (title): + return texinfo_file_name (remove_texinfo (title)) + +unnumbered_re = re.compile (r'unnumbered.*') +def process_sections (filename, lang_suffix, page): + sections = section_translation_re.findall (page) + basename = os.path.splitext (os.path.basename (filename))[0] + p = os.path.join (outdir, basename) + lang_suffix + '.xref-map' + f = open (p, 'w') + + this_title = '' + this_filename = 'index' + this_anchor = '' + this_unnumbered = False + had_section = False + for sec in sections: + if sec[0] == "node": + # Write out the cached values to the file and start a new section: + if this_title != '' and this_title != 'Top': + f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n") + had_section = False + this_title = remove_texinfo (sec[1]) + this_anchor = create_texinfo_anchor (sec[1]) + elif sec[0] == "translationof": + anchor = create_texinfo_anchor (sec[1]) + # If @translationof is used, it gives the original node name, which + # we use for the anchor and the file name (if it is a numbered node) + this_anchor = anchor + if not this_unnumbered: + this_filename = anchor + else: + # Some pages might not use a node for every section, so treat this + # case here, too: If we already had a section and encounter enother + # one before the next @node, we write out the old one and start + # with the new values + if had_section and this_title != '': + f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n") + this_title = remove_texinfo (sec[1]) + this_anchor = create_texinfo_anchor (sec[1]) + had_section = True + + # unnumbered nodes use the previously used file name, only numbered + # nodes get their own filename! However, top-level @unnumbered + # still get their own file. + this_unnumbered = unnumbered_re.match (sec[0]) + if not this_unnumbered or sec[0] == "unnumbered": + this_filename = this_anchor + + if this_title != '' and this_title != 'Top': + f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n") + f.close () + + +for filename in files: + print "extract_texi_filenames.py: Processing %s" % filename + (lang_suffix, sections) = extract_sections (filename) + process_sections (filename, lang_suffix, sections) diff --git a/scripts/build/gen-emmentaler-scripts.py b/scripts/build/gen-emmentaler-scripts.py new file mode 100644 index 0000000000..3da8840869 --- /dev/null +++ b/scripts/build/gen-emmentaler-scripts.py @@ -0,0 +1,104 @@ +#!@PYTHON@ +import sys +import getopt +import re +import os + +(options, files) = \ + getopt.getopt (sys.argv[1:], + '', + ['dir=']) + + +outdir = '' +for opt in options: + o = opt[0] + a = opt[1] + if o == '--dir': + outdir = a + else: + print o + raise getopt.error + +# Ugh +for design_size in [11,13,14,16,18,20,23,26]: + name = 'Emmentaler' + filename = name.lower () + script = '''#!@FONTFORGE@ + +New(); + +# Separate Feta versioning? +# * using 20 as Weight works for gnome-font-select widget: gfs + +notice = ""; +notice += "This font is distributed under the GNU General Public License. "; +notice += "As a special exception, if you create a document which uses "; +notice += "this font, and embed this font or unaltered portions of this "; +notice += "font into the document, this font does not by itself cause the "; +notice += "resulting document to be covered by the GNU General Public License.";; + +SetFontNames("%(name)s-%(design_size)d", "%(name)s", "%(name)s %(design_size)d", "%(design_size)d", notice, "@TOPLEVEL_VERSION@"); + +MergeFonts("feta%(design_size)d.pfb"); +MergeFonts("parmesan%(design_size)d.pfb"); + +# load nummer/din after setting PUA. +i = 0; +while (i < CharCnt()) + Select(i); +# crashes fontforge, use PUA for now -- jcn +# SetUnicodeValue(i + 0xF0000, 0); +/* +PRIVATE AREA + In the BMP, the range 0xe000 to 0xf8ff will never be assigned to any + characters by the standard and is reserved for private usage. For the + Linux community, this private area has been subdivided further into the + range 0xe000 to 0xefff which can be used individually by any end-user + and the Linux zone in the range 0xf000 to 0xf8ff where extensions are + coordinated among all Linux users. The registry of the characters + assigned to the Linux zone is currently maintained by H. Peter Anvin + . +*/ + SetUnicodeValue(i + 0xE000, 0); + ++i; +endloop + + +MergeFonts("feta-alphabet%(design_size)d.pfb"); +MergeKern("feta-alphabet%(design_size)d.tfm"); + +LoadTableFromFile("LILF", "%(filename)s-%(design_size)d.subfonts"); +LoadTableFromFile("LILC", "feta%(design_size)d.otf-table"); +LoadTableFromFile("LILY", "feta%(design_size)d.otf-gtable"); + +Generate("%(filename)s-%(design_size)d.otf"); +Generate("%(filename)s-%(design_size)d.svg"); +''' % vars() + + basename = '%s-%d' % (filename, design_size) + path = os.path.join (outdir, basename + '.pe') + open (path, 'w').write (script) + + subfonts = ['feta%(design_size)d', + 'parmesan%(design_size)d', + 'feta-alphabet%(design_size)d'] + + ns = [] + for s in subfonts: + ns.append ('%s' % (s % vars())) + + subfonts_str = ' '.join (ns) + + open (os.path.join (outdir, '%(filename)s-%(design_size)d.subfonts' % vars()), 'w').write (subfonts_str) + + path = os.path.join (outdir, '%s-%d.dep' % (filename, design_size)) + + deps = r'''%(filename)s-%(design_size)d.otf: $(outdir)/feta%(design_size)d.pfa \ + $(outdir)/parmesan%(design_size)d.pfa \ + $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-table \ + $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-gtable +''' % vars() + open (path, 'w').write (deps) + + open (os.path.join (outdir, basename + '.fontname'), 'w').write ("%s-%d" % (name, design_size)) diff --git a/scripts/build/genicon.py b/scripts/build/genicon.py new file mode 100644 index 0000000000..543735240f --- /dev/null +++ b/scripts/build/genicon.py @@ -0,0 +1,31 @@ +#!@PYTHON@ +import os +import sys +import tempfile + +base = os.path.splitext (os.path.split (sys.argv[1])[1])[0] +input = os.path.abspath (sys.argv[1]) +output = os.path.abspath (sys.argv[2]) +program_name= os.path.split (sys.argv[0])[1] + +dir = tempfile.mktemp (program_name) +os.mkdir (dir, 0777) +os.chdir(dir) + +def system (c): + print c + if os.system (c): + raise 'barf' + +outputs = [] +for sz in [48,32,16] : + + for depth in [24,8]: + out = '%(base)s-%(sz)d-%(depth)d.png' % locals() + system ('convert -depth %(depth)d -sample %(sz)d %(input)s %(out)s' % + locals ()) + outputs.append (out) + +system('icotool --output %s --create %s' % (output, ' '.join (outputs))) +system('rm -rf %(dir)s' % locals()) + diff --git a/scripts/build/grand-replace.sh b/scripts/build/grand-replace.sh new file mode 100644 index 0000000000..86d2608b80 --- /dev/null +++ b/scripts/build/grand-replace.sh @@ -0,0 +1,5 @@ +#!@BASH@ +# note: dash does not work + +pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*2007' '\1 2007--2008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change') +pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*([^-]*--)(200[0-7])' '\1 \2\062008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change') diff --git a/scripts/build/help2man.pl b/scripts/build/help2man.pl new file mode 100644 index 0000000000..9cb09c4859 --- /dev/null +++ b/scripts/build/help2man.pl @@ -0,0 +1,559 @@ +#!@PERL@ -w + +# Generate a short man page from --help and --version output. +# Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software +# Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +# Written by Brendan O'Dea +# Available from ftp://ftp.gnu.org/gnu/help2man/ + +use 5.005; +use strict; +use Getopt::Long; +use Text::Tabs qw(expand); +use POSIX qw(strftime setlocale LC_TIME); + +my $this_program = 'help2man'; +my $this_version = '1.28'; +my $version_info = < +EOT + +my $help_info = <. +EOT + +my $section = 1; +my $manual = ''; +my $source = ''; +my $help_option = '--help'; +my $version_option = '--version'; +my ($opt_name, @opt_include, $opt_output, $opt_info, $opt_no_info); + +my %opt_def = ( + 'n|name=s' => \$opt_name, + 's|section=s' => \$section, + 'm|manual=s' => \$manual, + 'S|source=s' => \$source, + 'i|include=s' => sub { push @opt_include, [ pop, 1 ] }, + 'I|opt-include=s' => sub { push @opt_include, [ pop, 0 ] }, + 'o|output=s' => \$opt_output, + 'p|info-page=s' => \$opt_info, + 'N|no-info' => \$opt_no_info, + 'h|help-option=s' => \$help_option, + 'v|version-option=s' => \$version_option, +); + +# Parse options. +Getopt::Long::config('bundling'); +GetOptions (%opt_def, + help => sub { print $help_info; exit }, + version => sub { print $version_info; exit }, +) or die $help_info; + +die $help_info unless @ARGV == 1; + +my %include = (); +my %append = (); +my @include = (); # retain order given in include file + +# Process include file (if given). Format is: +# +# [section name] +# verbatim text +# +# or +# +# /pattern/ +# verbatim text +# + +while (@opt_include) +{ + my ($inc, $required) = @{shift @opt_include}; + + next unless -f $inc or $required; + die "$this_program: can't open `$inc' ($!)\n" + unless open INC, $inc; + + my $key; + my $hash = \%include; + + while () + { + # [section] + if (/^\[([^]]+)\]/) + { + $key = uc $1; + $key =~ s/^\s+//; + $key =~ s/\s+$//; + $hash = \%include; + push @include, $key unless $include{$key}; + next; + } + + # /pattern/ + if (m!^/(.*)/([ims]*)!) + { + my $pat = $2 ? "(?$2)$1" : $1; + + # Check pattern. + eval { $key = qr($pat) }; + if ($@) + { + $@ =~ s/ at .*? line \d.*//; + die "$inc:$.:$@"; + } + + $hash = \%append; + next; + } + + # Check for options before the first section--anything else is + # silently ignored, allowing the first for comments and + # revision info. + unless ($key) + { + # handle options + if (/^-/) + { + local @ARGV = split; + GetOptions %opt_def; + } + + next; + } + + $hash->{$key} ||= ''; + $hash->{$key} .= $_; + } + + close INC; + + die "$this_program: no valid information found in `$inc'\n" + unless $key; +} + +# Compress trailing blank lines. +for my $hash (\(%include, %append)) +{ + for (keys %$hash) { $hash->{$_} =~ s/\n+$/\n/ } +} + +# Turn off localisation of executable's output. +@ENV{qw(LANGUAGE LANG LC_ALL)} = ('C') x 3; + +# Turn off localisation of date (for strftime). +setlocale LC_TIME, 'C'; + +# Grab help and version info from executable. +my ($help_text, $version_text) = map { + join '', map { s/ +$//; expand $_ } `$ARGV[0] $_ 2>/dev/null` + or die "$this_program: can't get `$_' info from $ARGV[0]\n" +} $help_option, $version_option; + +my $date = strftime "%B %Y", localtime; +(my $program = $ARGV[0]) =~ s!.*/!!; +my $package = $program; +my $version; + +if ($opt_output) +{ + unlink $opt_output + or die "$this_program: can't unlink $opt_output ($!)\n" + if -e $opt_output; + + open STDOUT, ">$opt_output" + or die "$this_program: can't create $opt_output ($!)\n"; +} + +# The first line of the --version information is assumed to be in one +# of the following formats: +# +# +# +# {GNU,Free} +# ({GNU,Free} ) +# - {GNU,Free} +# +# and seperated from any copyright/author details by a blank line. + +($_, $version_text) = split /\n+/, $version_text, 2; + +if (/^(\S+) +\(((?:GNU|Free) +[^)]+)\) +(.*)/ or + /^(\S+) +- *((?:GNU|Free) +\S+) +(.*)/) +{ + $program = $1; + $package = $2; + $version = $3; +} +elsif (/^((?:GNU|Free) +)?(\S+) +(.*)/) +{ + $program = $2; + $package = $1 ? "$1$2" : $2; + $version = $3; +} +else +{ + $version = $_; +} + +$program =~ s!.*/!!; + +# No info for `info' itself. +$opt_no_info = 1 if $program eq 'info'; + +# --name overrides --include contents. +$include{NAME} = "$program \\- $opt_name\n" if $opt_name; + +# Default (useless) NAME paragraph. +$include{NAME} ||= "$program \\- manual page for $program $version\n"; + +# Man pages traditionally have the page title in caps. +my $PROGRAM = uc $program; + +# Set default page head/footers +$source ||= "$program $version"; +unless ($manual) +{ + for ($section) + { + if (/^(1[Mm]|8)/) { $manual = 'System Administration Utilities' } + elsif (/^6/) { $manual = 'Games' } + else { $manual = 'User Commands' } + } +} + +# Extract usage clause(s) [if any] for SYNOPSIS. +if ($help_text =~ s/^Usage:( +(\S+))(.*)((?:\n(?: {6}\1| *or: +\S).*)*)//m) +{ + my @syn = $2 . $3; + + if ($_ = $4) + { + s/^\n//; + for (split /\n/) { s/^ *(or: +)?//; push @syn, $_ } + } + + my $synopsis = ''; + for (@syn) + { + $synopsis .= ".br\n" if $synopsis; + s!^\S*/!!; + s/^(\S+) *//; + $synopsis .= ".B $1\n"; + s/\s+$//; + s/(([][]|\.\.+)+)/\\fR$1\\fI/g; + s/^/\\fI/ unless s/^\\fR//; + $_ .= '\fR'; + s/(\\fI)( *)/$2$1/g; + s/\\fI\\fR//g; + s/^\\fR//; + s/\\fI$//; + s/^\./\\&./; + + $synopsis .= "$_\n"; + } + + $include{SYNOPSIS} ||= $synopsis; +} + +# Process text, initial section is DESCRIPTION. +my $sect = 'DESCRIPTION'; +$_ = "$help_text\n\n$version_text"; + +# Normalise paragraph breaks. +s/^\n+//; +s/\n*$/\n/; +s/\n\n+/\n\n/g; + +# Temporarily exchange leading dots, apostrophes and backslashes for +# tokens. +s/^\./\x80/mg; +s/^'/\x81/mg; +s/\\/\x82/g; + +# Start a new paragraph (if required) for these. +s/([^\n])\n(Report +bugs|Email +bug +reports +to|Written +by)/$1\n\n$2/g; + +sub convert_option; + +while (length) +{ + # Convert some standard paragraph names. + if (s/^(Options|Examples): *\n//) + { + $sect = uc $1; + next; + } + + # Copyright section + if (/^Copyright +[(\xa9]/) + { + $sect = 'COPYRIGHT'; + $include{$sect} ||= ''; + $include{$sect} .= ".PP\n" if $include{$sect}; + + my $copy; + ($copy, $_) = split /\n\n/, $_, 2; + + for ($copy) + { + # Add back newline + s/\n*$/\n/; + + # Convert iso9959-1 copyright symbol or (c) to nroff + # character. + s/^Copyright +(?:\xa9|\([Cc]\))/Copyright \\(co/mg; + + # Insert line breaks before additional copyright messages + # and the disclaimer. + s/(.)\n(Copyright |This +is +free +software)/$1\n.br\n$2/g; + + # Join hyphenated lines. + s/([A-Za-z])-\n */$1/g; + } + + $include{$sect} .= $copy; + $_ ||= ''; + next; + } + + # Catch bug report text. + if (/^(Report +bugs|Email +bug +reports +to) /) + { + $sect = 'REPORTING BUGS'; + } + + # Author section. + elsif (/^Written +by/) + { + $sect = 'AUTHOR'; + } + + # Examples, indicated by an indented leading $, % or > are + # rendered in a constant width font. + if (/^( +)([\$\%>] )\S/) + { + my $indent = $1; + my $prefix = $2; + my $break = '.IP'; + $include{$sect} ||= ''; + while (s/^$indent\Q$prefix\E(\S.*)\n*//) + { + $include{$sect} .= "$break\n\\f(CW$prefix$1\\fR\n"; + $break = '.br'; + } + + next; + } + + my $matched = ''; + $include{$sect} ||= ''; + + # Sub-sections have a trailing colon and the second line indented. + if (s/^(\S.*:) *\n / /) + { + $matched .= $& if %append; + $include{$sect} .= qq(.SS "$1"\n); + } + + my $indent = 0; + my $content = ''; + + # Option with description. + if (s/^( {1,10}([+-]\S.*?))(?:( +(?!-))|\n( {20,}))(\S.*)\n//) + { + $matched .= $& if %append; + $indent = length ($4 || "$1$3"); + $content = ".TP\n\x83$2\n\x83$5\n"; + unless ($4) + { + # Indent may be different on second line. + $indent = length $& if /^ {20,}/; + } + } + + # Option without description. + elsif (s/^ {1,10}([+-]\S.*)\n//) + { + $matched .= $& if %append; + $content = ".HP\n\x83$1\n"; + $indent = 80; # not continued + } + + # Indented paragraph with tag. + elsif (s/^( +(\S.*?) +)(\S.*)\n//) + { + $matched .= $& if %append; + $indent = length $1; + $content = ".TP\n\x83$2\n\x83$3\n"; + } + + # Indented paragraph. + elsif (s/^( +)(\S.*)\n//) + { + $matched .= $& if %append; + $indent = length $1; + $content = ".IP\n\x83$2\n"; + } + + # Left justified paragraph. + else + { + s/(.*)\n//; + $matched .= $& if %append; + $content = ".PP\n" if $include{$sect}; + $content .= "$1\n"; + } + + # Append continuations. + while (s/^ {$indent}(\S.*)\n//) + { + $matched .= $& if %append; + $content .= "\x83$1\n" + } + + # Move to next paragraph. + s/^\n+//; + + for ($content) + { + # Leading dot and apostrophe protection. + s/\x83\./\x80/g; + s/\x83'/\x81/g; + s/\x83//g; + + # Convert options. + s/(^| )(-[][\w=-]+)/$1 . convert_option $2/mge; + } + + # Check if matched paragraph contains /pat/. + if (%append) + { + for my $pat (keys %append) + { + if ($matched =~ $pat) + { + $content .= ".PP\n" unless $append{$pat} =~ /^\./; + $content .= $append{$pat}; + } + } + } + + $include{$sect} .= $content; +} + +# Refer to the real documentation. +unless ($opt_no_info) +{ + my $info_page = $opt_info || $program; + + $sect = 'SEE ALSO'; + $include{$sect} ||= ''; + $include{$sect} .= ".PP\n" if $include{$sect}; + $include{$sect} .= <|)(.*?)(?:|)'), + r'@command{\1}'), + 'texi2html': + (re.compile (r'@command{(.*?)}'), + r'\1'), + }, + 'code': { + 'html2texi': + (re.compile (r'(.*?)'), + r'@code{\1}'), + 'texi2html': + (re.compile (r'@code{(.*?)}'), + r'\1'), + }, + } + +whitespaces = re.compile (r'\s+') + + +def _ (s): + if not s: + return '' + str = whitespaces.sub (' ', s) + for c in html_codes: + str = str.replace (c[1], c[0]) + for command in texi_html_conversion: + d = texi_html_conversion[command] + str = d['html2texi'][0].sub (d['html2texi'][1], str) + str = my_gettext (str) + str = d['texi2html'][0].sub (d['texi2html'][1], str) + for c in html_codes: + str = str.replace (c[0], c[1]) + return str + +link_re = re.compile (r'') + +def link_gettext (m): + return '' + +makeinfo_title_re = re.compile (r'([^<]*?) - ([^<]*?)') + +def makeinfo_title_gettext (m): + return '' + _ (m.group (1)) + ' - ' + m.group (2) + '' + +texi2html_title_re = re.compile (r'(.+): ([A-Z\d.]+ |)(.+?)') + +def texi2html_title_gettext (m): + return '' + _ (m.group (1)) + double_punct_char_separator + ': ' \ + + m.group (2) + _ (m.group (3)) + '' + +a_href_re = re.compile ('(?s)[^>]*?href="[\\w.#-_]+"[^>]*?>)(?P)?\ +(?PAppendix )?(?P[A-Z0-9.]+ | (?:<){1,2} | [^>:]+?: | |)\ +(?P(?:|||[^>])+?)(?P(?(code)|))\ +(?P (?:>){1,2} | |):?') + +def a_href_gettext (m): + s = '' + if m.group(0)[-1] == ':': + s = double_punct_char_separator + ':' + t = '' + if m.group ('appendix'): + t = _ (m.group ('appendix')) + return '' + s + +h_re = re.compile (r'\s*(Appendix |)([A-Z\d.]+ |)(.+?)\s*') + +def h_gettext (m): + if m.group (3): + s = _ (m.group (3)) + else: + s= '' + return '' + s +\ + m.group (4) + _ (m.group (5)) + '' + +for filename in files: + f = open (filename, 'r') + page = f.read () + f.close () + page = link_re.sub (link_gettext, page) + page = makeinfo_title_re.sub (makeinfo_title_gettext, page) + page = texi2html_title_re.sub (texi2html_title_gettext, page) + page = a_href_re.sub (a_href_gettext, page) + page = h_re.sub (h_gettext, page) + for w in ('Next:', 'Previous:', 'Up:'): + page = page.replace (w, _ (w)) + page = langdefs.LANGDICT[lang].html_filter (page) + f = open (os.path.join (outdir, filename), 'w') + f.write (page) + f.close () diff --git a/scripts/build/install-info-html.sh b/scripts/build/install-info-html.sh new file mode 100644 index 0000000000..a116cd93d0 --- /dev/null +++ b/scripts/build/install-info-html.sh @@ -0,0 +1,157 @@ +#!@BASH@ + +name=install-info-html +version=1.0 + +all= +index_dir=. + +# +# debugging +# +debug_echo=: + + +# +# print usage +# +help () +{ + cat << EOF +$name $version +Install HTML info document. + +Usage: $name [OPTIONS]... [DOCUMENT-DIR]... + +Options: + -a, --all assume all subdirectories of index to be DOCUMENT-DIRs + -d, --dir=DIR set index directory to DIR (default=.) + -D, --debug print debugging info + -h, --help show this help text + -v, --version show version +EOF +} + + +cleanup () +{ + $debug_echo "cleaning ($?)..." +} + +trap cleanup 0 9 15 + +# +# Find command line options and switches +# + +# "x:" x takes argument +# +options="adhvW:" +# +# ugh, "\-" is a hack to support long options +# must be in double quotes for bash-2.0 + +while getopts "\-:$options" O +do + $debug_echo "O: \`$O'" + $debug_echo "arg: \`$OPTARG'" + case $O in + a) + all=yes + ;; + D) + [ "$debug_echo" = "echo" ] && set -x + debug_echo=echo + ;; + h) + help; + exit 0 + ;; + v) + echo $name $version + exit 0 + ;; + d) + index_dir=$OPTARG + ;; + # a long option! + -) + case "$OPTARG" in + a*|-a*) + all=yes + ;; + de*|-de*) + [ "$debug_echo" = "echo" ] && set -x + debug_echo=echo + ;; + h*|-h*) + help; + exit 0 + ;; + di*|-di*) + index_dir="`expr \"$OPTARG\" ':' '[^=]*=\(.*\)'`" + ;; + version|-version) + echo $name $version + exit 0 + ;; + *|-*) + echo "$0: invalid option -- \"$OPTARG\"" + help; + exit -1 + ;; + esac + esac +done +shift `expr $OPTIND - 1` + +# +# Input file name +# +if [ -z "$all" -a -z "$1" ]; then + help + echo "$name: No HTML documents given" + exit 2 +fi + +if [ -n "$all" -a -n "$1" ]; then + echo "$name: --all specified, ignoring DIRECTORY-DIRs" +fi + +if [ -n "$all" ]; then + document_dirs=`/bin/ls -d1 $index_dir` +else + document_dirs=$* +fi + +index_file=$index_dir/index.html +rm -f $index_file +echo -n "$name: Writing index: $index_file..." + +# head +cat >> $index_file < +Info documentation index + +

    Info documentation index

    +

    +This is the directory file \`index.html' a.k.a. \`DIR', which contains the +topmost node of the HTML Info hierarchy. +

    +
      +EOF + +#list +for i in $document_dirs; do + cat < $i ($i as one big page) +EOF +done >> $index_file + +# foot +cat >> $index_file < + + +EOF +echo diff --git a/scripts/build/lilypond-words.py b/scripts/build/lilypond-words.py new file mode 100644 index 0000000000..e9851f6231 --- /dev/null +++ b/scripts/build/lilypond-words.py @@ -0,0 +1,149 @@ +#!@PYTHON@ + +# Created 01 September 2003 by Heikki Junes. +# Rewritten by John Mandereau + +# Generates lilypond-words.el for (X)Emacs and lilypond-words[.vim] for Vim. + +import re +import sys +import os +import getopt + +keywords = [] +reserved_words = [] +note_names = [] + +# keywords not otherwise found +keywords += ['include', 'maininput', 'version'] + +# the main keywords +s = open ('lily/lily-lexer.cc', 'r').read () +keywords += [w for w in re.findall (r"\s*{\"(.+)\",\s*.*},\s*\n", s)] + +s = open ('scm/markup.scm', 'r').read () +keywords += [w for w in re.findall (r"(?m)^\s*\(cons\s*([a-z-]+)-markup", s)] + +# identifiers and keywords +for name in ['ly/chord-modifiers-init.ly', + 'ly/dynamic-scripts-init.ly', + 'ly/engraver-init.ly', + 'ly/grace-init.ly', + 'ly/gregorian.ly', + 'ly/music-functions-init.ly', + 'ly/performer-init.ly', + 'ly/property-init.ly', + 'ly/scale-definitions-init.ly', + 'ly/script-init.ly', + 'ly/spanners-init.ly', + 'ly/declarations-init.ly', + 'ly/params-init.ly']: + s = open (name, 'r').read () + keywords += [w for w in re.findall (r"(?m)^\s*\"?([a-zA-Z]+)\"?\s*=", s)] + +# note names +for name in ['ly/catalan.ly', + 'ly/deutsch.ly', + 'ly/drumpitch-init.ly', + 'ly/english.ly', + 'ly/espanol.ly', + 'ly/italiano.ly', + 'ly/nederlands.ly', + 'ly/norsk.ly', + 'ly/portugues.ly', + 'ly/suomi.ly', + 'ly/svenska.ly', + 'ly/vlaams.ly']: + s = open (name, 'r').read () + note_names += [n for n in re.findall (r"(?m)^\s*\(([a-z]+)[^l]+ly:make-pitch", s)] + +# reserved words +for name in ['ly/engraver-init.ly', + 'ly/performer-init.ly']: + s = open (name, 'r').read () + for pattern in [r"(?m)^\s*.consists\s+\"([a-zA-Z_]+)\"", + r"[\\]name\s+[\"]?([a-zA-Z_]+)[\"]?", + r"\s+([a-zA-Z_]+)\s*\\(?:set|override)"]: + reserved_words += [w for w in re.findall (pattern, s)] + +keywords = list (set (keywords)) +keywords.sort (reverse=True) + +reserved_words = list (set (reserved_words)) +reserved_words.sort (reverse=True) + +note_names = list (set (note_names)) +note_names.sort (reverse=True) + + +# output +outdir = '' +out_words = False +out_el = False +out_vim = False + +options = getopt.getopt (sys.argv[1:], + '', ['words', 'el', 'vim', 'dir='])[0] + +for (o, a) in options: + if o == '--words': + out_words = True + elif o == '--el': + out_el = True + elif o == '--vim': + out_vim = True + elif o == '--dir': + outdir = a + +if out_words or out_el: + outstring = ''.join (['\\\\' + w + '\n' for w in keywords]) + outstring += ''.join ([w + '\n' for w in reserved_words]) + outstring += ''.join ([w + '\n' for w in note_names]) + +if out_words: + f = open (os.path.join (outdir, 'lilypond-words'), 'w') + f.write (outstring) + +if out_el: + f = open (os.path.join (outdir, 'lilypond-words.el'), 'w') + f.write (outstring) + + # the menu in lilypond-mode.el + # for easier typing of this list, replace '/' with '\' below + # when writing to file + elisp_menu = ['/( - _ /) -', + '/[ - _ /] -', + '< - _ > -', + '<< - _ >> -', + '///( - _ ///) -', + '///[ - _ ///] -', + '///< - _ ///! -', + '///> - _ ///! -', + '//center - / << _ >> -', + '//column - / << _ >> -', + '//context/ Staff/ = - % { _ } -', + '//context/ Voice/ = - % { _ } -', + '//markup - { _ } -', + '//notes - { _ } -', + '//relative - % { _ } -', + '//score - { //n /? //simultaneous { //n _ //n } /! //n //paper { } //n /? //midi { } //n /! } //n -', + '//simultaneous - { _ } -', + '//sustainDown - _ //sustainUp -', + '//times - % { _ } -', + '//transpose - % { _ } -', + ''] + f.write ('\n'.join ([line.replace ('/', '\\') for line in elisp_menu])) + +if out_vim: + f = open (os.path.join (outdir, 'lilypond-words.vim'), 'w') + f.write ('syn match lilyKeyword \"[-_^]\\?\\\\\\(') + f.write (''.join ([w + '\\|' for w in keywords])) + f.write ('n\\)\\(\\A\\|\\n\\)\"me=e-1\n') + + f.write ('syn match lilyReservedWord \"\\(\\A\\|\\n\\)\\(') + f.write (''.join ([w + '\\|' for w in reserved_words])) + f.write ('Score\\)\\(\\A\\|\\n\\)\"ms=s+1,me=e-1\n') + + f.write ('syn match lilyNote \"\\<\\(\\(\\(') + f.write (''.join ([w + '\\|' for w in note_names])) + f.write ('a\\)\\([,\']\\)\\{,4}\\([?!]\\)\\?\\)\\|s\\|r\\)\\(\\(128\\|64\\|32\\|16\\|8\\|4\\|2\\|1\\|\\\\breve\\|\\\\longa\\|\\\\maxima\\)[.]\\{,8}\\)\\?\\(\\A\\|\\n\\)\"me=e-1\n') diff --git a/scripts/build/lys-to-tely.py b/scripts/build/lys-to-tely.py new file mode 100644 index 0000000000..c9d698f92c --- /dev/null +++ b/scripts/build/lys-to-tely.py @@ -0,0 +1,125 @@ +#!@PYTHON@ + + +''' +TODO: + + * Add @nodes, split at sections? + +''' + + +import sys +import os +import getopt +import re + +program_name = 'lys-to-tely' + +include_snippets = '@lysnippets' +fragment_options = 'printfilename,texidoc' +help_text = r"""Usage: %(program_name)s [OPTIONS]... LY-FILE... +Construct tely doc from LY-FILEs. + +Options: + -h, --help print this help + -f, --fragment-options=OPTIONS use OPTIONS as lilypond-book fragment + options + -o, --output=NAME write tely doc to NAME + -t, --title=TITLE set tely doc title TITLE + --template=TEMPLATE use TEMPLATE as Texinfo template file, + instead of standard template; TEMPLATE should contain a command + '%(include_snippets)s' to tell where to insert LY-FILEs. When this + option is used, NAME and TITLE are ignored. +""" + +def help (text): + sys.stdout.write ( text) + sys.exit (0) + +(options, files) = getopt.getopt (sys.argv[1:], 'f:hn:t:', + ['fragment-options=', 'help', 'name=', 'title=', 'template=']) + +name = "ly-doc" +title = "Ly Doc" +template = '''\input texinfo +@setfilename %%(name)s.info +@settitle %%(title)s + +@documentencoding utf-8 +@iftex +@afourpaper +@end iftex + +@finalout @c we do not want black boxes. + +@c fool ls-latex +@ignore +@author Han-Wen Nienhuys and Jan Nieuwenhuizen +@title %%(title)s +@end ignore + +@node Top, , , (dir) +@top %%(title)s + +%s + +@bye +''' % include_snippets + +for opt in options: + o = opt[0] + a = opt[1] + if o == '-h' or o == '--help': + # We can't use vars () inside a function, as that only contains all + # local variables and none of the global variables! Thus we have to + # generate the help text here and pass it to the function... + help (help_text % vars ()) + elif o == '-n' or o == '--name': + name = a + elif o == '-t' or o == '--title': + title = a + elif o == '-f' or o == '--fragment-options': + fragment_options = a + elif o == '--template': + template = open (a, 'r').read () + else: + raise Exception ('unknown option: ' + o) + +texi_file_re = re.compile ('.*\.i?te(ly|xi)$') + +def name2line (n): + if texi_file_re.match (n): + # We have a texi include file, simply include it: + s = r"@include %s" % os.path.basename (n) + else: + # Assume it's a lilypond file -> create image etc. + s = r""" +@ifhtml +@html + +@end html +@end ifhtml + +@lilypondfile[%s]{%s} +""" % (os.path.basename (n), fragment_options, n) + return s + +if files: + dir = os.path.dirname (name) or "." +# don't strip .tely extension, input/lsr uses .itely + name = os.path.basename (name) + template = template % vars () + + s = "\n".join (map (name2line, files)) + s = template.replace (include_snippets, s, 1) + f = "%s/%s" % (dir, name) + sys.stderr.write ("%s: writing %s..." % (program_name, f)) + h = open (f, "w") + h.write (s) + h.close () + sys.stderr.write ('\n') +else: + # not Unix philosophy, but hey, at least we notice when + # we don't distribute any .ly files. + sys.stderr.write ("No files specified. Doing nothing") diff --git a/scripts/build/mass-link.py b/scripts/build/mass-link.py new file mode 100644 index 0000000000..17412e5559 --- /dev/null +++ b/scripts/build/mass-link.py @@ -0,0 +1,67 @@ +#!@PYTHON@ +# mass-link.py + +# USAGE: mass-link.py [--prepend-suffix SUFFIX] symbolic | hard SOURCEDIR DESTDIR FILES +# +# create hard or symbolic links to SOURCEDIR/FILES in DESTDIR +# +# If --prepend-suffix is specified, link to foo.bar will be called fooSUFFIX.bar. +# Shell wildcards expansion is performed on FILES. + +import sys +import os +import glob +import getopt + +print "mass-link.py" + +optlist, args = getopt.getopt (sys.argv[1:], '', ['prepend-suffix=']) +link_type, source_dir, dest_dir = args[0:3] +files = args[3:] + +source_dir = os.path.normpath (source_dir) +dest_dir = os.path.normpath (dest_dir) + +prepended_suffix = '' +for x in optlist: + if x[0] == '--prepend-suffix': + prepended_suffix = x[1] + +if prepended_suffix: + def insert_suffix (p): + l = p.split ('.') + if len (l) >= 2: + l[-2] += prepended_suffix + return '.'.join (l) + return p + prepended_suffix +else: + insert_suffix = lambda p: p + +if link_type == 'symbolic': + link = os.symlink +elif link_type == 'hard': + link = os.link +else: + sys.stderr.write(sys.argv[0] + ': ' + link_type + ": wrong argument, expected 'symbolic' or 'hard'\n") + sys.exit (1) + +sourcefiles = [] +for pattern in files: + sourcefiles += (glob.glob (os.path.join (source_dir, pattern))) + +def relative_path (f): + if source_dir == '.': + return f + return f[len (source_dir) + 1:] + +destfiles = [os.path.join (dest_dir, insert_suffix (relative_path (f))) for f in sourcefiles] + +destdirs = set ([os.path.dirname (dest) for dest in destfiles]) +[os.makedirs (d) for d in destdirs if not os.path.exists (d)] + +def force_link (src,dest): + if os.path.exists (dest): + os.system ('rm -f ' + dest) + link (src, dest) + +map (force_link, sourcefiles, destfiles) diff --git a/scripts/build/mf-to-table.py b/scripts/build/mf-to-table.py new file mode 100644 index 0000000000..2c1df75f7f --- /dev/null +++ b/scripts/build/mf-to-table.py @@ -0,0 +1,244 @@ +#!@PYTHON@ + +# mf-to-table.py -- convert spacing info in MF logs . +# +# source file of the GNU LilyPond music typesetter +# +# (c) 1997--2008 Han-Wen Nienhuys + +import os +import sys +import getopt +import re +import time + +def read_log_file (fn): + str = open (fn).read () + str = re.sub ('\n', '', str) + str = re.sub ('[\t ]+', ' ', str) + + deps = [] + autolines = [] + def include_func (match, d = deps): + d.append (match.group (1)) + return '' + + def auto_func (match, a = autolines): + a.append (match.group (1)) + return '' + + str = re.sub ('\\(([/a-z.A-Z_0-9-]+\\.mf)', include_func, str) + str = re.sub ('@{(.*?)@}', auto_func, str) + + return (autolines, deps) + + +class Char_metric: + def __init__ (self): + pass + +font_family = 'feta' + +def parse_logfile (fn): + autolines, deps = read_log_file (fn) + charmetrics = [] + + global_info = { + 'filename' : os.path.splitext (os.path.basename (fn))[0] + } + group = '' + + for l in autolines: + tags = l.split ('@:') + if tags[0] == 'group': + group = tags[1] + elif tags[0] == 'puorg': + group = '' + elif tags[0] == 'char': + name = tags[9] + + if group: + name = group + '.' + name + m = { + 'description': tags[1], + 'name': name, + 'code': int (tags[2]), + 'breapth': float (tags[3]), + 'width': float (tags[4]), + 'depth': float (tags[5]), + 'height': float (tags[6]), + 'wx': float (tags[7]), + 'wy': float (tags[8]), + } + charmetrics.append (m) + elif tags[0] == 'font': + global font_family + font_family = (tags[3]) + # To omit 'GNU' (foundry) from font name proper: + # name = tags[2:] + #urg + if 0: # testing + tags.append ('Regular') + + encoding = re.sub (' ','-', tags[5]) + tags = tags[:-1] + name = tags[1:] + global_info['design_size'] = float (tags[4]) + global_info['FontName'] = '-'.join (name) + global_info['FullName'] = ' '.join (name) + global_info['FamilyName'] = '-'.join (name[1:-1]) + if 1: + global_info['Weight'] = tags[4] + else: # testing + global_info['Weight'] = tags[-1] + + global_info['FontBBox'] = '0 0 1000 1000' + global_info['Ascender'] = '0' + global_info['Descender'] = '0' + global_info['EncodingScheme'] = encoding + + elif tags[0] == 'parameter': + global_info[tags[1]] = tags[2]; + + return (global_info, charmetrics, deps) + + + +def character_lisp_table (global_info, charmetrics): + + def conv_char_metric (charmetric): + f = 1.0 + s = """(%s . +((bbox . (%f %f %f %f)) +(subfont . "%s") +(subfont-index . %d) +(attachment . (%f . %f)))) +""" %(charmetric['name'], + -charmetric['breapth'] * f, + -charmetric['depth'] * f, + charmetric['width'] * f, + charmetric['height'] * f, + global_info['filename'], + charmetric['code'], + charmetric['wx'], + charmetric['wy']) + + return s + + s = '' + for c in charmetrics: + s += conv_char_metric (c) + + return s + + +def global_lisp_table (global_info): + str = '' + + keys = ['staffsize', 'stafflinethickness', 'staff_space', + 'linethickness', 'black_notehead_width', 'ledgerlinethickness', + 'design_size', + 'blot_diameter' + ] + for k in keys: + if global_info.has_key (k): + str = str + "(%s . %s)\n" % (k,global_info[k]) + + return str + + +def ps_encoding (name, global_info, charmetrics): + encs = ['.notdef'] * 256 + for m in charmetrics: + encs[m['code']] = m['name'] + + + s = ('/%s [\n' % name) + for m in range (0, 256): + s += (' /%s %% %d\n' % (encs[m], m)) + s += ('] def\n') + return s + +def get_deps (deps, targets): + s = '' + for t in targets: + t = re.sub ( '^\\./', '', t) + s += ('%s '% t) + s += (": ") + for d in deps: + s += ('%s ' % d) + s += ('\n') + return s + +def help (): + sys.stdout.write(r"""Usage: mf-to-table [OPTIONS] LOGFILEs + +Generate feta metrics table from preparated feta log. + +Options: + -d, --dep=FILE print dependency info to FILE + -h, --help print this help + -l, --ly=FILE name output table + -o, --outdir=DIR prefix for dependency info + -p, --package=DIR specify package + + """) + sys.exit (0) + + +(options, files) = \ + getopt.getopt (sys.argv[1:], + 'a:d:ho:p:t:', + ['enc=', 'outdir=', 'dep=', 'lisp=', + 'global-lisp=', + 'debug', 'help', 'package=']) + +global_lisp_nm = '' +char_lisp_nm = '' +enc_nm = '' +depfile_nm = '' +lyfile_nm = '' +outdir_prefix = '.' + +for opt in options: + o = opt[0] + a = opt[1] + if o == '--dep' or o == '-d': + depfile_nm = a + elif o == '--outdir' or o == '-o': + outdir_prefix = a + elif o == '--lisp': + char_lisp_nm = a + elif o == '--global-lisp': + global_lisp_nm = a + elif o == '--enc': + enc_nm = a + elif o== '--help' or o == '-h': + help() + elif o == '--debug': + debug_b = 1 + else: + print o + raise getopt.error + +base = os.path.splitext (lyfile_nm)[0] + +for filenm in files: + (g, m, deps) = parse_logfile (filenm) + + enc_name = 'FetaEncoding' + if re.search ('parmesan', filenm): + enc_name = 'ParmesanEncoding' + elif re.search ('feta-brace', filenm): + enc_name = 'FetaBraceEncoding' + elif re.search ('feta-alphabet', filenm): + enc_name = 'FetaAlphabetEncoding'; + + open (enc_nm, 'w').write (ps_encoding (enc_name, g, m)) + open (char_lisp_nm, 'w').write (character_lisp_table (g, m)) + open (global_lisp_nm, 'w').write (global_lisp_table (g)) + if depfile_nm: + open (depfile_nm, 'wb').write (get_deps (deps, + [base + '.log', base + '.dvi', base + '.pfa', + depfile_nm, + base + '.pfb'])) diff --git a/scripts/build/mf2pt1.pl b/scripts/build/mf2pt1.pl new file mode 100644 index 0000000000..95df0f8206 --- /dev/null +++ b/scripts/build/mf2pt1.pl @@ -0,0 +1,1090 @@ +#!@PERL@ + +################################################## +# Convert stylized Metafont to PostScript Type 1 # +# By Scott Pakin # +################################################## + +######################################################################## +# mf2pt1 # +# Copyright (C) 2008 Scott Pakin # +# # +# This program may be distributed and/or modified under the conditions # +# of the LaTeX Project Public License, either version 1.3c of this # +# license or (at your option) any later version. # +# # +# The latest version of this license is in: # +# # +# http://www.latex-project.org/lppl.txt # +# # +# and version 1.3c or later is part of all distributions of LaTeX # +# version 2006/05/20 or later. # +######################################################################## + +our $VERSION = "2.4.4"; # mf2pt1 version number +require 5.6.1; # I haven't tested mf2pt1 with older Perl versions + +use File::Basename; +use File::Spec; +use Getopt::Long; +use Pod::Usage; +use Math::Trig; +use warnings; +use strict; + +# Define some common encoding vectors. +my @standardencoding = + ((map {"_a$_"} (0..31)), + qw (space exclam quotedbl numbersign dollar percent ampersand + quoteright parenleft parenright asterisk plus comma hyphen + period slash zero one two three four five six seven eight + nine colon semicolon less equal greater question at A B C D E + F G H I J K L M N O P Q R S T U V W X Y Z bracketleft + backslash bracketright asciicircum underscore quoteleft a b c + d e f g h i j k l m n o p q r s t u v w x y z braceleft bar + braceright asciitilde), + (map {"_a$_"} (127..160)), + qw (exclamdown cent sterling fraction yen florin section currency + quotesingle quotedblleft guillemotleft guilsinglleft + guilsinglright fi fl _a176 endash dagger daggerdbl + periodcentered _a181 paragraph bullet quotesinglbase + quotedblbase quotedblright guillemotright ellipsis + perthousand _a190 questiondown _a192 grave acute circumflex + tilde macron breve dotaccent dieresis _a201 ring cedilla + _a204 hungarumlaut ogonek caron emdash), + (map {"_a$_"} (209..224)), + qw (AE _a226 ordfeminine _a228 _a229 _a230 _a231 Lslash Oslash OE + ordmasculine _a236 _a237 _a238 _a239 _a240 ae _a242 _a243 + _a244 dotlessi _a246 _a247 lslash oslash oe germandbls _a252 + _a253 _a254 _a255)); +my @isolatin1encoding = + ((map {"_a$_"} (0..31)), + qw (space exclam quotedbl numbersign dollar percent ampersand + quoteright parenleft parenright asterisk plus comma minus + period slash zero one two three four five six seven eight + nine colon semicolon less equal greater question at A B C D E + F G H I J K L M N O P Q R S T U V W X Y Z bracketleft + backslash bracketright asciicircum underscore quoteleft a b c + d e f g h i j k l m n o p q r s t u v w x y z braceleft bar + braceright asciitilde), + (map {"_a$_"} (128..143)), + qw (dotlessi grave acute circumflex tilde macron breve dotaccent + dieresis _a153 ring cedilla _a156 hungarumlaut ogonek + caron space exclamdown cent sterling currency yen brokenbar + section dieresis copyright ordfeminine guillemotleft + logicalnot hyphen registered macron degree plusminus + twosuperior threesuperior acute mu paragraph periodcentered + cedilla onesuperior ordmasculine guillemotright onequarter + onehalf threequarters questiondown Agrave Aacute Acircumflex + Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex + Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde + Ograve Oacute Ocircumflex Otilde Odieresis multiply Oslash + Ugrave Uacute Ucircumflex Udieresis Yacute Thorn germandbls + agrave aacute acircumflex atilde adieresis aring ae ccedilla + egrave eacute ecircumflex edieresis igrave iacute icircumflex + idieresis eth ntilde ograve oacute ocircumflex otilde + odieresis divide oslash ugrave uacute ucircumflex udieresis + yacute thorn ydieresis)); +my @ot1encoding = + qw (Gamma Delta Theta Lambda Xi Pi Sigma Upsilon Phi + Psi Omega ff fi fl ffi ffl dotlessi dotlessj grave acute caron + breve macron ring cedilla germandbls ae oe oslash AE OE Oslash + suppress exclam quotedblright numbersign dollar percent + ampersand quoteright parenleft parenright asterisk plus comma + hyphen period slash zero one two three four five six seven + eight nine colon semicolon exclamdown equal questiondown + question at A B C D E F G H I J K L M N O P Q R S T U V W X Y + Z bracketleft quotedblleft bracketright circumflex dotaccent + quoteleft a b c d e f g h i j k l m n o p q r s t u v w x y z + endash emdash hungarumlaut tilde dieresis); +my @t1encoding = + qw (grave acute circumflex tilde dieresis hungarumlaut ring caron + breve macron dotaccent cedilla ogonek quotesinglbase + guilsinglleft guilsinglright quotedblleft quotedblright + quotedblbase guillemotleft guillemotright endash emdash cwm + perthousand dotlessi dotlessj ff fi fl ffi ffl space exclam + quotedbl numbersign dollar percent ampersand quoteright + parenleft parenright asterisk plus comma hyphen period slash + zero one two three four five six seven eight nine colon + semicolon less equal greater question at A B C D E F G H I J K L + M N O P Q R S T U V W X Y Z bracketleft backslash bracketright + asciicircum underscore quoteleft a b c d e f g h i j k l m n o p + q r s t u v w x y z braceleft bar braceright asciitilde + sfthyphen Abreve Aogonek Cacute Ccaron Dcaron Ecaron Eogonek + Gbreve Lacute Lcaron Lslash Nacute Ncaron Eng Ohungarumlaut + Racute Rcaron Sacute Scaron Scedilla Tcaron Tcedilla + Uhungarumlaut Uring Ydieresis Zacute Zcaron Zdotaccent IJ + Idotaccent dcroat section abreve aogonek cacute ccaron dcaron + ecaron eogonek gbreve lacute lcaron lslash nacute ncaron eng + ohungarumlaut racute rcaron sacute scaron scedilla tcaron + tcedilla uhungarumlaut uring ydieresis zacute zcaron zdotaccent + ij exclamdown questiondown sterling Agrave Aacute Acircumflex + Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex + Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde Ograve + Oacute Ocircumflex Otilde Odieresis OE Oslash Ugrave Uacute + Ucircumflex Udieresis Yacute Thorn SS agrave aacute acircumflex + atilde adieresis aring ae ccedilla egrave eacute ecircumflex + edieresis igrave iacute icircumflex idieresis eth ntilde ograve + oacute ocircumflex otilde odieresis oe oslash ugrave uacute + ucircumflex udieresis yacute thorn germandbls); + +# Define font parameters that the user can override. +my $fontversion; +my $creationdate; +my $comment; +my $familyname; +my $weight; +my $fullname; +my $fixedpitch; +my $italicangle; +my $underlinepos; +my $underlinethick; +my $fontname; +my $uniqueID; +my $designsize; +my ($mffile, $pt1file, $pfbfile, $ffscript); +my $encoding; +my $rounding; +my $bpppix; + +# Define all of our other global variables. +my $progname = basename $0, ".pl"; +my $mag; +my @fontbbox; +my @charbbox; +my @charwd; +my @glyphname; +my @charfiles; +my $filebase; +my $filedir; +my $filenoext; +my $versionmsg = "mf2pt1 version $VERSION + +Copyright (C) 2008 Scott Pakin + +This program may be distributed and/or modified under the conditions +of the LaTeX Project Public License, either version 1.3c of this +license or (at your option) any later version. + +The latest version of this license is in: + + http://www.latex-project.org/lppl.txt + +and version 1.3c or later is part of all distributions of LaTeX +version 2006/05/20 or later. +"; + + +###################################################################### + +# The routines to compute the fractional approximation of a real number +# are heavily based on code posted by Ben Tilly +# on Nov 16th, 2000, to the +# PerlMonks list. See . + + +# Takes numerator/denominator pairs. +# Returns a PS fraction string representation (with a trailing space). +sub frac_string (@) +{ + my $res = ""; + + while (@_) { + my $n = shift; + my $d = shift; + $res .= $n . " "; + $res .= $d . " div " if $d > 1; + } + + return $res; +} + + +# Takes a number. +# Returns a numerator and denominator with the smallest denominator +# so that the difference of the resulting fraction to the number is +# smaller or equal to $rounding. +sub frac_approx ($) +{ + my $num = shift; + my $f = ret_frac_iter ($num); + + while (1) { + my ($n, $m) = $f->(); + my $approx = $n / $m; + my $delta = abs ($num - $approx); + return ($n, $m) if ($delta <= $rounding); + } +} + + +# Takes a number, returns the best integer approximation and (in list +# context) the error. +sub best_int ($) +{ + my $x = shift; + my $approx = sprintf '%.0f', $x; + if (wantarray) { + return ($approx, $x - $approx); + } + else { + return $approx; + } +} + + +# Takes a numerator and denominator, in scalar context returns +# the best fraction describing them, in list the numerator and +# denominator. +sub frac_standard ($$) +{ + my $n = best_int(shift); + my $m = best_int(shift); + my $k = gcd($n, $m); + $n /= $k; + $m /= $k; + if ($m < 0) { + $n *= -1; + $m *= -1; + } + if (wantarray) { + return ($n, $m); + } + else { + return "$n/$m"; + } +} + + +# Euclidean algorithm for calculating a GCD. +# Takes two integers, returns the greatest common divisor. +sub gcd ($$) +{ + my ($n, $m) = @_; + while ($m) { + my $k = $n % $m; + ($n, $m) = ($m, $k); + } + return $n; +} + + +# Takes a list of terms in a continued fraction, and converts it +# into a fraction. +sub ints_to_frac (@) +{ + my ($n, $m) = (0, 1); # Start with 0 + while (@_) { + my $k = pop; + if ($n) { + # Want frac for $k + 1/($n/$m) + ($n, $m) = frac_standard($k*$n + $m, $n); + } + else { + # Want $k + ($n, $m) = frac_standard($k, 1); + } + } + return frac_standard($n, $m); +} + + +# Takes a number, returns an anon sub which iterates through a set of +# fractional approximations that converges very quickly to the number. +sub ret_frac_iter ($) +{ + my $x = shift; + my $term_iter = ret_next_term_iter($x); + my @ints; + return sub { + push @ints, $term_iter->(); + return ints_to_frac(@ints); + } +} + + +# Terms of a continued fraction converging on that number. +sub ret_next_term_iter ($) +{ + my $x = shift; + return sub { + (my $n, $x) = best_int($x); + if (0 != $x) { + $x = 1/$x; + } + return $n; + } +} + +###################################################################### + +# Round a number to the nearest integer. +sub round ($) +{ + return int($_[0] + 0.5*($_[0] <=> 0)); +} + + +# Round a number to a given precision. +sub prec ($) +{ + return round ($_[0] / $rounding) * $rounding; +} + + +# Set a variable's value to the first defined value in the given list. +# If the variable was not previously defined and no value in the list +# is defined, do nothing. +sub assign_default (\$@) +{ + my $varptr = shift; # Pointer to variable to define + return if defined $$varptr && $$varptr ne "UNSPECIFIED"; + foreach my $val (@_) { + next if !defined $val; + $$varptr = $val; + return; + } +} + + +# Print and execute a shell command. An environment variable with the +# same name as the command overrides the command name. Return 1 on +# success, 0 on failure. Optionally abort if the command fails, based +# on the first argument to execute_command. +sub execute_command ($@) +{ + my $abort_on_failure = shift; + my @command = @_; + $command[0] = $ENV{uc $command[0]} || $command[0]; + my $prettyargs = join (" ", map {/[\\ ]/ ? "'$_'" : $_} @command); + print "Invoking \"$prettyargs\"...\n"; + my $result = system @command; + die "${progname}: \"$prettyargs\" failed ($!)\n" if $result && $abort_on_failure; + return !$result; +} + + +# Output the font header. +sub output_header () +{ + # Show the initial boilerplate. + print OUTFILE <<"ENDHEADER"; +%!FontType1-1.0: $fontname $fontversion +%%CreationDate: $creationdate +% Font converted to Type 1 by mf2pt1, written by Scott Pakin. +11 dict begin +/FontInfo 11 dict dup begin +/version ($fontversion) readonly def +/Notice ($comment) readonly def +/FullName ($fullname) readonly def +/FamilyName ($familyname) readonly def +/Weight ($weight) readonly def +/ItalicAngle $italicangle def +/isFixedPitch $fixedpitch def +/UnderlinePosition $underlinepos def +/UnderlineThickness $underlinethick def +end readonly def +/FontName /$fontname def +ENDHEADER + + # If we're not using an encoding that PostScript knows about, then + # create an encoding vector. + if ($encoding==\@standardencoding) { + print OUTFILE "/Encoding StandardEncoding def\n"; + } + else { + print OUTFILE "/Encoding 256 array\n"; + print OUTFILE "0 1 255 {1 index exch /.notdef put} for\n"; + foreach my $charnum (0 .. $#{$encoding}) { + if ($encoding->[$charnum] && $encoding->[$charnum]!~/^_a\d+$/) { + print OUTFILE "dup $charnum /$encoding->[$charnum] put\n"; + } + } + print OUTFILE "readonly def\n"; + } + + # Show the final boilerplate. + print OUTFILE <<"ENDHEADER"; +/PaintType 0 def +/FontType 1 def +/FontMatrix [0.001 0 0 0.001 0 0] readonly def +/UniqueID $uniqueID def +/FontBBox{@fontbbox}readonly def +currentdict end +currentfile eexec +dup /Private 5 dict dup begin +/RD{string currentfile exch readstring pop}executeonly def +/ND{noaccess def}executeonly def +/NP{noaccess put}executeonly def +ENDHEADER +} + + +# Use MetaPost to generate one PostScript file per character. We +# calculate the font bounding box from these characters and store them +# in @fontbbox. If the input parameter is 1, set other font +# parameters, too. +sub get_bboxes ($) +{ + execute_command 1, ("mpost", "-mem=mf2pt1", "-progname=mpost", + "\\mode:=localfont; mag:=$mag; bpppix $bpppix; input $mffile"); + opendir (CURDIR, ".") || die "${progname}: $! ($filedir)\n"; + @charfiles = sort + { ($a=~ /\.(\d+)$/)[0] <=> ($b=~ /\.(\d+)$/)[0] } + grep /^$filebase.*\.\d+$/, readdir(CURDIR); + close CURDIR; + @fontbbox = (1000000, 1000000, -1000000, -1000000); + foreach my $psfile (@charfiles) { + # Read the character number from the output file's extension. + $psfile =~ /\.(\d+)$/; + my $charnum = $1; + + # Process in turn each line of the current PostScript file. + my $havebbox = 0; + open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n"; + while () { + my @tokens = split " "; + if ($tokens[0] eq "%%BoundingBox:") { + # Store the MetaPost-produced bounding box, just in case + # the given font doesn't use beginchar. + @tokens = ("%", "MF2PT1:", "glyph_dimensions", @tokens[1..4]); + $havebbox--; + } + next if $#tokens<1 || $tokens[1] ne "MF2PT1:"; + + # Process a "special" inserted into the generated PostScript. + MF2PT1_CMD: + { + # glyph_dimensions llx lly urx ury -- specified glyph dimensions + $tokens[2] eq "glyph_dimensions" && do { + my @bbox = @tokens[3..6]; + $fontbbox[0]=$bbox[0] if $bbox[0]<$fontbbox[0]; + $fontbbox[1]=$bbox[1] if $bbox[1]<$fontbbox[1]; + $fontbbox[2]=$bbox[2] if $bbox[2]>$fontbbox[2]; + $fontbbox[3]=$bbox[3] if $bbox[3]>$fontbbox[3]; + $charbbox[$charnum] = \@bbox; + $havebbox++; + last MF2PT1_CMD; + }; + + # If all we want is the bounding box, exit the loop now. + last MF2PT1_CMD if !$_[0]; + + # glyph_name name -- glyph name + $tokens[2] eq "glyph_name" && do { + $glyphname[$charnum] = $tokens[3]; + last MF2PT1_CMD; + }; + + # charwd wd -- character width as in TFM + $tokens[2] eq "charwd" && do { + $charwd[$charnum] = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_identifier name -- full font name + $tokens[2] eq "font_identifier" && do { + $fullname = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_size number -- font design size (pt, not bp) + $tokens[2] eq "font_size" && $tokens[3] && do { + $designsize = $tokens[3] * 72 / 72.27; + last MF2PT1_CMD; + }; + + # font_slant number -- italic amount + $tokens[2] eq "font_slant" && do { + $italicangle = 0 + rad2deg (atan(-$tokens[3])); + last MF2PT1_CMD; + }; + + # font_coding_scheme string -- font encoding + $tokens[2] eq "font_coding_scheme" && do { + $encoding = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_version string -- font version number (xxx.yyy) + $tokens[2] eq "font_version" && do { + $fontversion = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_comment string -- font comment notice + $tokens[2] eq "font_comment" && do { + $comment = join (" ", @tokens[3..$#tokens]); + last MF2PT1_CMD; + }; + + # font_family string -- font family name + $tokens[2] eq "font_family" && do { + $familyname = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_weight string -- font weight (e.g., "Book" or "Heavy") + $tokens[2] eq "font_weight" && do { + $weight = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_fixed_pitch number -- fixed width font (0=false, 1=true) + $tokens[2] eq "font_fixed_pitch" && do { + $fixedpitch = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_underline_position number -- vertical underline position + $tokens[2] eq "font_underline_position" && do { + # We store $underlinepos in points and later + # scale it by 1000/$designsize. + $underlinepos = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_underline_thickness number -- thickness of underline + $tokens[2] eq "font_underline_thickness" && do { + # We store $underlinethick in points and later + # scale it by 1000/$designsize. + $underlinethick = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_name string -- font name + $tokens[2] eq "font_name" && do { + $fontname = $tokens[3]; + last MF2PT1_CMD; + }; + + # font_unique_id number (as string) -- globally unique font ID + $tokens[2] eq "font_unique_id" && do { + $uniqueID = 0+$tokens[3]; + last MF2PT1_CMD; + }; + } + } + close PSFILE; + if (!$havebbox) { + warn "${progname}: No beginchar in character $charnum; glyph dimensions are probably incorrect\n"; + } + } +} + + +# Convert ordinary, MetaPost-produced PostScript files into Type 1 +# font programs. +sub output_font_programs () +{ + # Iterate over all the characters. We convert each one, line by + # line and token by token. + print "Converting PostScript graphics to Type 1 font programs...\n"; + foreach my $psfile (@charfiles) { + # Initialize the font program. + $psfile =~ /\.(\d+)$/; + my $charnum = $1; + my $gname = $glyphname[$charnum] || $encoding->[$charnum]; + my @fontprog; + push @fontprog, ("/$gname {", + frac_string (frac_approx ($charbbox[$charnum]->[0]), + frac_approx ($charwd[$charnum] * $mag)) + . "hsbw"); + my ($cpx, $cpy) = + ($charbbox[$charnum]->[0], 0); # Current point (PostScript) + + # Iterate over every line in the current file. + open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n"; + while (my $oneline=) { + next if $oneline=~/^\%/; + next if $oneline=~/set/; # Fortunately, "set" never occurs on "good" lines. + my @arglist; # Arguments to current PostScript function + + # Iterate over every token in the current line. + TOKENLOOP: + foreach my $token (split " ", $oneline) { + # Number: Round and push on the argument list. + $token =~ /^[-.\d]+$/ && do { + push @arglist, prec ($&); + next TOKENLOOP; + }; + + # curveto: Convert to vhcurveto, hvcurveto, or rrcurveto. + $token eq "curveto" && do { + my ($dx1, $dy1) = ($arglist[0] - $cpx, + $arglist[1] - $cpy); + my ($dx1n, $dx1d) = frac_approx ($dx1); + my ($dy1n, $dy1d) = frac_approx ($dy1); + $cpx += $dx1n / $dx1d; + $cpy += $dy1n / $dy1d; + + my ($dx2, $dy2) = ($arglist[2] - $cpx, + $arglist[3] - $cpy); + my ($dx2n, $dx2d) = frac_approx ($dx2); + my ($dy2n, $dy2d) = frac_approx ($dy2); + $cpx += $dx2n / $dx2d; + $cpy += $dy2n / $dy2d; + + my ($dx3, $dy3) = ($arglist[4] - $cpx, + $arglist[5] - $cpy); + my ($dx3n, $dx3d) = frac_approx ($dx3); + my ($dy3n, $dy3d) = frac_approx ($dy3); + $cpx += $dx3n / $dx3d; + $cpy += $dy3n / $dy3d; + + if (!$dx1n && !$dy3n) { + push @fontprog, frac_string ($dy1n, $dy1d, + $dx2n, $dx2d, + $dy2n, $dy2d, + $dx3n, $dx3d) + . "vhcurveto"; + } + elsif (!$dy1n && !$dx3n) { + push @fontprog, frac_string ($dx1n, $dx1d, + $dx2n, $dx2d, + $dy2n, $dy2d, + $dy3n, $dy3d) + . "hvcurveto"; + } + else { + push @fontprog, frac_string ($dx1n, $dx1d, + $dy1n, $dy1d, + $dx2n, $dx2d, + $dy2n, $dy2d, + $dx3n, $dx3d, + $dy3n, $dy3d) + . "rrcurveto"; + } + next TOKENLOOP; + }; + + # lineto: Convert to vlineto, hlineto, or rlineto. + $token eq "lineto" && do { + my ($dx, $dy) = ($arglist[0] - $cpx, + $arglist[1] - $cpy); + my ($dxn, $dxd) = frac_approx ($dx); + my ($dyn, $dyd) = frac_approx ($dy); + $cpx += $dxn / $dxd; + $cpy += $dyn / $dyd; + + if (!$dxn) { + push @fontprog, frac_string ($dyn, $dyd) + . "vlineto" if $dyn; + } + elsif (!$dyn) { + push @fontprog, frac_string ($dxn, $dxd) + . "hlineto"; + } + else { + push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd) + . "rlineto"; + } + next TOKENLOOP; + }; + + # moveto: Convert to vmoveto, hmoveto, or rmoveto. + $token eq "moveto" && do { + my ($dx, $dy) = ($arglist[0] - $cpx, + $arglist[1] - $cpy); + my ($dxn, $dxd) = frac_approx ($dx); + my ($dyn, $dyd) = frac_approx ($dy); + $cpx += $dxn / $dxd; + $cpy += $dyn / $dyd; + + if (!$dxn) { + push @fontprog, frac_string ($dyn, $dyd) + . "vmoveto"; + } + elsif (!$dyn) { + push @fontprog, frac_string ($dxn, $dxd) + . "hmoveto"; + } + else { + push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd) + . "rmoveto"; + } + next TOKENLOOP; + }; + + # closepath: Output as is. + $token eq "closepath" && do { + push @fontprog, $token; + next TOKENLOOP; + }; + } + } + close PSFILE; + push @fontprog, ("endchar", + "} ND"); + print OUTFILE join ("\n\t", @fontprog), "\n"; + } +} + + +# Output the final set of code for the Type 1 font. +sub output_trailer () +{ + print OUTFILE <<"ENDTRAILER"; +/.notdef { + 0 @{[$fontbbox[2]-$fontbbox[0]]} hsbw + endchar + } ND +end +end +readonly put +noaccess put +dup/FontName get exch definefont pop +mark currentfile closefile +cleartomark +ENDTRAILER +} + +###################################################################### + +# Parse the command line. Asterisks in the following represents +# commands also defined by Plain Metafont. +my %opthash = (); +GetOptions (\%opthash, + "fontversion=s", # font_version + "comment=s", # font_comment + "family=s", # font_family + "weight=s", # font_weight + "fullname=s", # font_identifier (*) + "fixedpitch!", # font_fixed_pitch + "italicangle=f", # font_slant (*) + "underpos=f", # font_underline_position + "underthick=f", # font_underline_thickness + "name=s", # font_name + "uniqueid=i", # font_unique_id + "designsize=f", # font_size (*) + "encoding=s", # font_coding_scheme (*) + "rounding=f", + "bpppix=f", + "ffscript=s", + "h|help", + "V|version") || pod2usage(2); +if (defined $opthash{"h"}) { + pod2usage(-verbose => 1, + -output => \*STDOUT, # Bug workaround for Pod::Usage + -exitval => "NOEXIT"); + print "Please e-mail bug reports to scott+mf\@pakin.org.\n"; + exit 1; +} +do {print $versionmsg; exit 1} if defined $opthash{"V"}; +pod2usage(2) if $#ARGV != 0; + +# Extract the filename from the command line. +$mffile = $ARGV[0]; +my @fileparts = fileparse $mffile, ".mf"; +$filebase = $fileparts[0]; +$filedir = $fileparts[1]; +$filenoext = File::Spec->catfile ($filedir, $filebase); +$pt1file = $filebase . ".pt1"; +$pfbfile = $filebase . ".pfb"; + +assign_default $bpppix, $opthash{bpppix}, 0.02; + +# Make our first pass through the input, to set values for various options. +$mag = 100; # Get a more precise bounding box. +get_bboxes(1); # This might set $designsize. + +# Sanity-check the specified precision. +assign_default $rounding, $opthash{rounding}, 1; +if ($rounding<=0.0 || $rounding>1.0) { + die sprintf "%s: Invalid rounding amount \"%g\"; value must be a positive number no greater than 1.0\n", $progname, $rounding; +} + +# Ensure that every user-definable parameter is assigned a value. +assign_default $fontversion, $opthash{fontversion}, "001.000"; +assign_default $creationdate, scalar localtime; +assign_default $comment, $opthash{comment}, "Font converted to Type 1 by mf2pt1, written by Scott Pakin."; +assign_default $weight, $opthash{weight}, "Medium"; +assign_default $fixedpitch, $opthash{fixedpitch}, 0; +assign_default $uniqueID, $opthash{uniqueid}, int(rand(1000000)) + 4000000; +assign_default $designsize, $opthash{designsize}; +die "${progname}: a design size must be specified in $mffile or on the command line\n" if !defined $designsize; +die "${progname}: the design size must be a positive number\n" if $designsize<=0.0; +assign_default $underlinepos, $opthash{underpos}, -1; +$underlinepos = round(1000*$underlinepos/$designsize); +assign_default $underlinethick, $opthash{underthick}, 0.5; +$underlinethick = round(1000*$underlinethick/$designsize); +assign_default $fullname, $opthash{fullname}, $filebase; +assign_default $familyname, $opthash{family}, $fullname; +assign_default $italicangle, $opthash{italicangle}, 0; +assign_default $fontname, $opthash{name}, "$familyname-$weight"; +$fontname =~ s/\s//g; +assign_default $encoding, $opthash{encoding}, "standard"; +my $encoding_name = $encoding; +ENCODING: +{ + if (-e $encoding) { + # Filenames take precedence over built-in encodings. + my @enc_array; + open (ENCFILE, "<$encoding") || die "${progname}: $! ($encoding)\n"; + while (my $oneline = ) { + $oneline =~ s/\%.*$//; + foreach my $word (split " ", $oneline) { + push @enc_array, substr($word, 1) if substr($word, 0, 1) eq "/"; + } + } + close ENCFILE; + $encoding_name = substr (shift @enc_array, 1); + $encoding = \@enc_array; + last ENCODING; + } + $encoding=\@standardencoding, last ENCODING if $encoding eq "standard"; + $encoding=\@isolatin1encoding, last ENCODING if $encoding eq "isolatin1"; + $encoding=\@ot1encoding, last ENCODING if $encoding eq "ot1"; + $encoding=\@t1encoding, last ENCODING if $encoding eq "t1"; + $encoding=\@glyphname, last ENCODING if $encoding eq "asis"; + warn "${progname}: Unknown encoding \"$encoding\"; using standard Adobe encoding\n"; + $encoding=\@standardencoding; # Default to standard encoding +} +assign_default $fixedpitch, $opthash{fixedpitch}, 0; +$fixedpitch = $fixedpitch ? "true" : "false"; +assign_default $ffscript, $opthash{ffscript}; + +# Output the final values of all of our parameters. +print "\n"; +print <<"PARAMVALUES"; +mf2pt1 is using the following font parameters: + font_version: $fontversion + font_comment: $comment + font_family: $familyname + font_weight: $weight + font_identifier: $fullname + font_fixed_pitch: $fixedpitch + font_slant: $italicangle + font_underline_position: $underlinepos + font_underline_thickness: $underlinethick + font_name: $fontname + font_unique_id: $uniqueID + font_size: $designsize (bp) + font_coding_scheme: $encoding_name +PARAMVALUES + ; +print "\n"; + +# Scale by a factor of 1000/design size. +$mag = 1000.0 / $designsize; +get_bboxes(0); +print "\n"; + +# Output the font in disassembled format. +open (OUTFILE, ">$pt1file") || die "${progname}: $! ($pt1file)\n"; +output_header(); +printf OUTFILE "2 index /CharStrings %d dict dup begin\n", + 1+scalar(grep {defined($_)} @charbbox); +output_font_programs(); +output_trailer(); +close OUTFILE; +unlink @charfiles; +print "\n"; + +# Convert from the disassembled font format to Type 1 binary format. +if (!execute_command 0, ("t1asm", $pt1file, $pfbfile)) { + die "${progname}: You'll need either to install t1utils and rerun $progname or find another way to convert $pt1file to $pfbfile\n"; + exit 1; +} +print "\n"; +unlink $pt1file; + +# Use FontForge to autohint the result. +my $user_script = 0; # 1=script file was provided by the user; 0=created here +if (defined $ffscript) { + # The user provided his own script. + $user_script = 1; +} +else { + # Create a FontForge script file. + $ffscript = $filebase . ".pe"; + open (FFSCRIPT, ">$ffscript") || die "${progname}: $! ($ffscript)\n"; + print FFSCRIPT <<'AUTOHINT'; +Open($1); +SelectAll(); +RemoveOverlap(); +AddExtrema(); +Simplify(0, 2); +CorrectDirection(); +Simplify(0, 2); +RoundToInt(); +AutoHint(); +Generate($1); +Quit(0); +AUTOHINT + ; + close FFSCRIPT; +} +if (!execute_command 0, ("fontforge", "-script", $ffscript, $pfbfile)) { + warn "${progname}: You'll need to install FontForge if you want $pfbfile autohinted (not required, but strongly recommended)\n"; +} +unlink $ffscript if !$user_script; +print "\n"; + +# Finish up. +print "*** Successfully generated $pfbfile! ***\n"; +exit 0; + +###################################################################### + +__END__ + +=head1 NAME + +mf2pt1 - produce a PostScript Type 1 font program from a Metafont source + + +=head1 SYNOPSIS + +mf2pt1 +[B<--help>] +[B<--version>] +[B<--comment>=I] +[B<--designsize>=I] +[B<--encoding>=I] +[B<--family>=I] +[B<-->[B]B] +[B<--fontversion>=I] +[B<--fullname>=I] +[B<--italicangle>=I] +[B<--name>=I] +[B<--underpos>=I] +[B<--underthick>=I] +[B<--uniqueid>=I] +[B<--weight>=I] +[B<--rounding>=I] +[B<--bpppix>=I] +[B<--ffscript>=I] +I.mf + + +=head1 WARNING + +The B Info file is the main source of documentation for +B. This man page is merely a brief summary. + + +=head1 DESCRIPTION + +B facilitates producing PostScript Type 1 fonts from a +Metafont source file. It is I, as the name may imply, an +automatic converter of arbitrary Metafont fonts to Type 1 format. +B imposes a number of restrictions on the Metafont input. If +these restrictions are met, B will produce valid Type 1 +output. (Actually, it produces "disassembled" Type 1; the B +program from the B suite will convert this to a true Type 1 +font.) + +=head2 Usage + + mf2pt1 myfont.mf + +=head1 OPTIONS + +Font parameters are best specified within a Metafont program. If +necessary, though, command-line options can override any of these +parameters. The B Info page, the primary source of B +documentation, describes the following in greater detail. + +=over 4 + +=item B<--help> + +Provide help on B's command-line options. + +=item B<--version> + +Output the B version number, copyright, and license. + +=item B<--comment>=I + +Include a font comment, usually a copyright notice. + +=item B<--designsize>=I + +Specify the font design size in points. + +=item B<--encoding>=I + +Designate the font encoding, either the name of a---typically +F<.enc>---file which contains a PostScript font-encoding vector or one +of C (the default), C, C, or C. + +=item B<--family>=I + +Specify the font family. + +=item B<--fixedpitch>, B<--nofixedpitch> + +Assert that the font uses either monospaced (B<--fixedpitch>) or +proportional (B<--nofixedpitch>) character widths. + +=item B<--fontversion>=I + +Specify the font's major and minor version number. + +=item B<--fullname>=I + +Designate the full font name (family plus modifiers). + +=item B<--italicangle>=I + +Designate the italic angle in degrees counterclockwise from vertical. + +=item B<--name>=I + +Provide the font name. + +=item B<--underpos>=I + +Specify the vertical position of the underline in thousandths of the +font height. + +=item B<--underthick>=I + +Specify the thickness of the underline in thousandths of the font +height. + +=item B<--uniqueid>=I + +Specify a globally unique font identifier. + +=item B<--weight>=I + +Provide a description of the font weight (e.g., ``Heavy''). + +=item B<--rounding>=I + +Specify the fraction of a font unit (0.0 < I <= 1.0) to which +to round coordinate values [default: 1.0]. + +=item B<--bpppix>=I + +Redefine the number of big points per pixel from 0.02 to I. + +=item B<--ffscript>=I + +Name a script to pass to FontForge. + +=back + + +=head1 FILES + +F (which is generated from F and F) + + +=head1 NOTES + +As stated in L, the complete source of documentation for +B is the Info page, not this man page. + + +=head1 SEE ALSO + +mf(1), mpost(1), t1asm(1), fontforge(1) + + +=head1 AUTHOR + +Scott Pakin, I diff --git a/scripts/build/mutopia-index.py b/scripts/build/mutopia-index.py new file mode 100644 index 0000000000..31fa6ac492 --- /dev/null +++ b/scripts/build/mutopia-index.py @@ -0,0 +1,197 @@ +#!@PYTHON@ +# mutopia-index.py + +import fnmatch +import getopt +import os +import re +import stat +import sys + +def find (pat, dir): + f = os.popen ('find %s -name "%s"'% (dir, pat)) + lst = [] + for a in f.readlines(): + a = a[:-1] + lst.append (a) + return lst + + +junk_prefix = 'out-www/' + +headertext= r""" + +

      LilyPond samples

      + + +

      You are looking at a page with some LilyPond samples. These files +are also included in the distribution. The output is completely +generated from the source file, without any further touch up. + +

      + +The pictures are 90 dpi anti-aliased snapshots of the printed output. +For a good impression of the quality print out the PDF file. +""" + +headertext_nopics= r""" +

      No examples were found in this directory. +""" + +# +# FIXME breaks on multiple strings. +# +def read_lilypond_header (fn): + s = open (fn).read () + s = re.sub ('%.*$', '', s) + s = re.sub ('\n', ' ', s) + + dict = {} + m = re.search (r"""\\header\s*{([^}]*)}""", s) + + if m: + s = m.group (1) + else: + return dict + + while s: + m = re.search (r'''\s*(\S+)\s*=\s*"([^"]+)"''', s) + if m == None: + s = '' + else: + s = s[m.end (0):] + left = m.group (1) + right = m.group (2) + + left = re.sub ('"', '', left) + right = re.sub ('"', '', right) + dict[left] = right + + return dict + +def help (): + sys.stdout.write (r'''Usage: mutopia-index [OPTIONS] INFILE OUTFILE +Generate index for mutopia. + +Options: + -h, --help print this help + -o, --output=FILE write output to file + -s, --subdirs=DIR add subdir + --suffix=SUF specify suffix + +''') + sys.exit (0) + +# ugh. +def gen_list (inputs, file_name): + sys.stderr.write ("generating HTML list %s" % file_name) + sys.stderr.write ('\n') + if file_name: + list = open (file_name, 'w') + else: + list = sys.stdout + list.write ('''Rendered Examples + + +''') + + list.write ('\n') + + if inputs: + list.write (headertext) + else: + list.write (headertext_nopics) + + for ex in inputs: + print ex + + (base, ext) = os.path.splitext (ex) + (base, ext2) = os.path.splitext (base) + ext = ext2 + ext + + header = read_lilypond_header (ex) + head = header.get ('title', os.path.basename (base)) + composer = header.get ('composer', '') + desc = header.get ('description', '') + list.write ('


      \n') + list.write ('

      %s

      \n' % head); + if composer: + list.write ('

      %s

      \n' % composer) + if desc: + list.write ('%s

      ' % desc) + list.write ('

        \n') + + def list_item (file_name, desc, type, lst = list): + if os.path.isfile (file_name): + lst.write ('
      • %s' + % (re.sub (junk_prefix, '', file_name), desc)) + + # FIXME: include warning if it uses \include + # files. + + size = os.stat (file_name)[stat.ST_SIZE] + kB = (size + 512) / 1024 + if kB: + lst.write (' (%s %d kB)' % (type, kB)) + else: + lst.write (' (%s %d characters)' + % (type, size)) + pictures = ['jpeg', 'png', 'xpm'] + lst.write ('\n') + else: + print "cannot find" , `file_name` + + list_item (base + ext, 'The input', 'ASCII') + + pages_found = 0 + for page in range (1, 100): + f = base + '-page%d.png' % page + + if not os.path.isfile (f): + break + pages_found += 1 + list_item (f, 'See a picture of page %d' % page, 'png') + + if pages_found == 0 and os.path.exists (base + '.png'): + list_item (base + ".png", + 'See a picture', 'png') + + + list_item (base + '.pdf', 'Print', 'PDF') + list_item (base + '.midi', 'Listen', 'MIDI') + list.write ('
      \n'); + + list.write ('\n'); + list.close () + +(options, files) = getopt.getopt (sys.argv[1:], + 'ho:', ['help', 'output=']) +outfile = 'examples.html' + +subdirs = [] +for (o, a) in options: + if o == '--help' or o == '-h': + help () + elif o == '--output' or o == '-o': + outfile = a + +dirs = [] +for f in files: + dirs += find ('out-www', f) + +if not dirs: + dirs = ['.'] + +allfiles = [] + +for d in dirs: + allfiles += find ('*.ly', d) + +allfiles = [f for f in allfiles + if not f.endswith ('snippet-map.ly') + and not re.search ('lily-[0-9a-f]+', f) + and 'musicxml' not in f] + +gen_list (allfiles, outfile) diff --git a/scripts/build/output-distance.py b/scripts/build/output-distance.py new file mode 100644 index 0000000000..afc4cf908f --- /dev/null +++ b/scripts/build/output-distance.py @@ -0,0 +1,1262 @@ +#!@PYTHON@ +import sys +import optparse +import os +import math + +## so we can call directly as scripts/build/output-distance.py +me_path = os.path.abspath (os.path.split (sys.argv[0])[0]) +sys.path.insert (0, me_path + '/../python/') +sys.path.insert (0, me_path + '/../python/out/') + + +X_AXIS = 0 +Y_AXIS = 1 +INFTY = 1e6 + +OUTPUT_EXPRESSION_PENALTY = 1 +ORPHAN_GROB_PENALTY = 1 +options = None + +################################################################ +# system interface. +temp_dir = None +class TempDirectory: + def __init__ (self): + import tempfile + self.dir = tempfile.mkdtemp () + print 'dir is', self.dir + def __del__ (self): + print 'rm -rf %s' % self.dir + os.system ('rm -rf %s' % self.dir) + def __call__ (self): + return self.dir + + +def get_temp_dir (): + global temp_dir + if not temp_dir: + temp_dir = TempDirectory () + return temp_dir () + +def read_pipe (c): + print 'pipe' , c + return os.popen (c).read () + +def system (c): + print 'system' , c + s = os.system (c) + if s : + raise Exception ("failed") + return + +def shorten_string (s): + threshold = 15 + if len (s) > 2*threshold: + s = s[:threshold] + '..' + s[-threshold:] + return s + +def max_distance (x1, x2): + dist = 0.0 + + for (p,q) in zip (x1, x2): + dist = max (abs (p-q), dist) + + return dist + + +def compare_png_images (old, new, dest_dir): + def png_dims (f): + m = re.search ('([0-9]+) x ([0-9]+)', read_pipe ('file %s' % f)) + + return tuple (map (int, m.groups ())) + + dest = os.path.join (dest_dir, new.replace ('.png', '.compare.jpeg')) + try: + dims1 = png_dims (old) + dims2 = png_dims (new) + except AttributeError: + ## hmmm. what to do? + system ('touch %(dest)s' % locals ()) + return + + dims = (min (dims1[0], dims2[0]), + min (dims1[1], dims2[1])) + + dir = get_temp_dir () + system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop1.png' % (dims + (old, dir))) + system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop2.png' % (dims + (new, dir))) + + system ('compare -depth 8 %(dir)s/crop1.png %(dir)s/crop2.png %(dir)s/diff.png' % locals ()) + + system ("convert -depth 8 %(dir)s/diff.png -blur 0x3 -negate -channel alpha,blue -type TrueColorMatte -fx 'intensity' %(dir)s/matte.png" % locals ()) + + system ("composite -compose atop -quality 65 %(dir)s/matte.png %(new)s %(dest)s" % locals ()) + + +################################################################ +# interval/bbox arithmetic. + +empty_interval = (INFTY, -INFTY) +empty_bbox = (empty_interval, empty_interval) + +def interval_is_empty (i): + return i[0] > i[1] + +def interval_length (i): + return max (i[1]-i[0], 0) + +def interval_union (i1, i2): + return (min (i1[0], i2[0]), + max (i1[1], i2[1])) + +def interval_intersect (i1, i2): + return (max (i1[0], i2[0]), + min (i1[1], i2[1])) + +def bbox_is_empty (b): + return (interval_is_empty (b[0]) + or interval_is_empty (b[1])) + +def bbox_union (b1, b2): + return (interval_union (b1[X_AXIS], b2[X_AXIS]), + interval_union (b2[Y_AXIS], b2[Y_AXIS])) + +def bbox_intersection (b1, b2): + return (interval_intersect (b1[X_AXIS], b2[X_AXIS]), + interval_intersect (b2[Y_AXIS], b2[Y_AXIS])) + +def bbox_area (b): + return interval_length (b[X_AXIS]) * interval_length (b[Y_AXIS]) + +def bbox_diameter (b): + return max (interval_length (b[X_AXIS]), + interval_length (b[Y_AXIS])) + + +def difference_area (a, b): + return bbox_area (a) - bbox_area (bbox_intersection (a,b)) + +class GrobSignature: + def __init__ (self, exp_list): + (self.name, self.origin, bbox_x, + bbox_y, self.output_expression) = tuple (exp_list) + + self.bbox = (bbox_x, bbox_y) + self.centroid = (bbox_x[0] + bbox_x[1], bbox_y[0] + bbox_y[1]) + + def __repr__ (self): + return '%s: (%.2f,%.2f), (%.2f,%.2f)\n' % (self.name, + self.bbox[0][0], + self.bbox[0][1], + self.bbox[1][0], + self.bbox[1][1]) + + def axis_centroid (self, axis): + return apply (sum, self.bbox[axis]) / 2 + + def centroid_distance (self, other, scale): + return max_distance (self.centroid, other.centroid) / scale + + def bbox_distance (self, other): + divisor = bbox_area (self.bbox) + bbox_area (other.bbox) + + if divisor: + return (difference_area (self.bbox, other.bbox) + + difference_area (other.bbox, self.bbox)) / divisor + else: + return 0.0 + + def expression_distance (self, other): + if self.output_expression == other.output_expression: + return 0 + else: + return 1 + +################################################################ +# single System. + +class SystemSignature: + def __init__ (self, grob_sigs): + d = {} + for g in grob_sigs: + val = d.setdefault (g.name, []) + val += [g] + + self.grob_dict = d + self.set_all_bbox (grob_sigs) + + def set_all_bbox (self, grobs): + self.bbox = empty_bbox + for g in grobs: + self.bbox = bbox_union (g.bbox, self.bbox) + + def closest (self, grob_name, centroid): + min_d = INFTY + min_g = None + try: + grobs = self.grob_dict[grob_name] + + for g in grobs: + d = max_distance (g.centroid, centroid) + if d < min_d: + min_d = d + min_g = g + + + return min_g + + except KeyError: + return None + def grobs (self): + return reduce (lambda x,y: x+y, self.grob_dict.values(), []) + +################################################################ +## comparison of systems. + +class SystemLink: + def __init__ (self, system1, system2): + self.system1 = system1 + self.system2 = system2 + + self.link_list_dict = {} + self.back_link_dict = {} + + + ## pairs + self.orphans = [] + + ## pair -> distance + self.geo_distances = {} + + ## pairs + self.expression_changed = [] + + self._geometric_distance = None + self._expression_change_count = None + self._orphan_count = None + + for g in system1.grobs (): + + ## skip empty bboxes. + if bbox_is_empty (g.bbox): + continue + + closest = system2.closest (g.name, g.centroid) + + self.link_list_dict.setdefault (closest, []) + self.link_list_dict[closest].append (g) + self.back_link_dict[g] = closest + + + def calc_geometric_distance (self): + total = 0.0 + for (g1,g2) in self.back_link_dict.items (): + if g2: + d = g1.bbox_distance (g2) + if d: + self.geo_distances[(g1,g2)] = d + + total += d + + self._geometric_distance = total + + def calc_orphan_count (self): + count = 0 + for (g1, g2) in self.back_link_dict.items (): + if g2 == None: + self.orphans.append ((g1, None)) + + count += 1 + + self._orphan_count = count + + def calc_output_exp_distance (self): + d = 0 + for (g1,g2) in self.back_link_dict.items (): + if g2: + d += g1.expression_distance (g2) + + self._expression_change_count = d + + def output_expression_details_string (self): + return ', '.join ([g1.name for g1 in self.expression_changed]) + + def geo_details_string (self): + results = [(d, g1,g2) for ((g1, g2), d) in self.geo_distances.items()] + results.sort () + results.reverse () + + return ', '.join (['%s: %f' % (g1.name, d) for (d, g1, g2) in results]) + + def orphan_details_string (self): + return ', '.join (['%s-None' % g1.name for (g1,g2) in self.orphans if g2==None]) + + def geometric_distance (self): + if self._geometric_distance == None: + self.calc_geometric_distance () + return self._geometric_distance + + def orphan_count (self): + if self._orphan_count == None: + self.calc_orphan_count () + + return self._orphan_count + + def output_expression_change_count (self): + if self._expression_change_count == None: + self.calc_output_exp_distance () + return self._expression_change_count + + def distance (self): + return (self.output_expression_change_count (), + self.orphan_count (), + self.geometric_distance ()) + +def read_signature_file (name): + print 'reading', name + + entries = open (name).read ().split ('\n') + def string_to_tup (s): + return tuple (map (float, s.split (' '))) + + def string_to_entry (s): + fields = s.split('@') + fields[2] = string_to_tup (fields[2]) + fields[3] = string_to_tup (fields[3]) + + return tuple (fields) + + entries = [string_to_entry (e) for e in entries + if e and not e.startswith ('#')] + + grob_sigs = [GrobSignature (e) for e in entries] + sig = SystemSignature (grob_sigs) + return sig + + +################################################################ +# different systems of a .ly file. + +hash_to_original_name = {} + +class FileLink: + def __init__ (self, f1, f2): + self._distance = None + self.file_names = (f1, f2) + + def text_record_string (self): + return '%-30f %-20s\n' % (self.distance (), + self.name () + + os.path.splitext (self.file_names[1])[1] + ) + + def calc_distance (self): + return 0.0 + + def distance (self): + if self._distance == None: + self._distance = self.calc_distance () + + return self._distance + + def source_file (self): + for ext in ('.ly', '.ly.txt'): + base = os.path.splitext (self.file_names[1])[0] + f = base + ext + if os.path.exists (f): + return f + + return '' + + def name (self): + base = os.path.basename (self.file_names[1]) + base = os.path.splitext (base)[0] + base = hash_to_original_name.get (base, base) + base = os.path.splitext (base)[0] + return base + + def extension (self): + return os.path.splitext (self.file_names[1])[1] + + def link_files_for_html (self, dest_dir): + for f in self.file_names: + link_file (f, os.path.join (dest_dir, f)) + + def get_distance_details (self): + return '' + + def get_cell (self, oldnew): + return '' + + def get_file (self, oldnew): + return self.file_names[oldnew] + + def html_record_string (self, dest_dir): + dist = self.distance() + + details = self.get_distance_details () + if details: + details_base = os.path.splitext (self.file_names[1])[0] + details_base += '.details.html' + fn = dest_dir + '/' + details_base + open_write_file (fn).write (details) + + details = '
      (details)' % locals () + + cell1 = self.get_cell (0) + cell2 = self.get_cell (1) + + name = self.name () + self.extension () + file1 = self.get_file (0) + file2 = self.get_file (1) + + return ''' + +%(dist)f +%(details)s + +%(cell1)s
      %(name)s +%(cell2)s
      %(name)s +''' % locals () + + +class FileCompareLink (FileLink): + def __init__ (self, f1, f2): + FileLink.__init__ (self, f1, f2) + self.contents = (self.get_content (self.file_names[0]), + self.get_content (self.file_names[1])) + + + def calc_distance (self): + ## todo: could use import MIDI to pinpoint + ## what & where changed. + + if self.contents[0] == self.contents[1]: + return 0.0 + else: + return 100.0; + + def get_content (self, f): + print 'reading', f + s = open (f).read () + return s + + +class GitFileCompareLink (FileCompareLink): + def get_cell (self, oldnew): + str = self.contents[oldnew] + + # truncate long lines + str = '\n'.join ([l[:80] for l in str.split ('\n')]) + + + str = '
      %s
      ' % str + return str + + def calc_distance (self): + if self.contents[0] == self.contents[1]: + d = 0.0 + else: + d = 1.0001 *options.threshold + + return d + + +class TextFileCompareLink (FileCompareLink): + def calc_distance (self): + import difflib + diff = difflib.unified_diff (self.contents[0].strip().split ('\n'), + self.contents[1].strip().split ('\n'), + fromfiledate = self.file_names[0], + tofiledate = self.file_names[1] + ) + + self.diff_lines = [l for l in diff] + self.diff_lines = self.diff_lines[2:] + + return math.sqrt (float (len ([l for l in self.diff_lines if l[0] in '-+']))) + + def get_cell (self, oldnew): + str = '' + if oldnew == 1: + str = '\n'.join ([d.replace ('\n','') for d in self.diff_lines]) + str = '
      %s
      ' % str + return str + +class LogFileCompareLink (TextFileCompareLink): + def get_content (self, f): + c = TextFileCompareLink.get_content (self, f) + c = re.sub ("\nProcessing `[^\n]+'\n", '', c) + return c + +class ProfileFileLink (FileCompareLink): + def __init__ (self, f1, f2): + FileCompareLink.__init__ (self, f1, f2) + self.results = [{}, {}] + + def get_cell (self, oldnew): + str = '' + for k in ('time', 'cells'): + if oldnew==0: + str += '%-8s: %d\n' % (k, int (self.results[oldnew][k])) + else: + str += '%-8s: %8d (%5.3f)\n' % (k, int (self.results[oldnew][k]), + self.get_ratio (k)) + + return '
      %s
      ' % str + + def get_ratio (self, key): + (v1,v2) = (self.results[0].get (key, -1), + self.results[1].get (key, -1)) + + if v1 <= 0 or v2 <= 0: + return 0.0 + + return (v1 - v2) / float (v1+v2) + + def calc_distance (self): + for oldnew in (0,1): + def note_info (m): + self.results[oldnew][m.group(1)] = float (m.group (2)) + + re.sub ('([a-z]+): ([-0-9.]+)\n', + note_info, self.contents[oldnew]) + + dist = 0.0 + factor = { + 'time': 0.1, + 'cells': 5.0, + } + + for k in ('time', 'cells'): + real_val = math.tan (self.get_ratio (k) * 0.5 * math.pi) + dist += math.exp (math.fabs (real_val) * factor[k]) - 1 + + dist = min (dist, 100) + return dist + + +class MidiFileLink (TextFileCompareLink): + def get_content (self, oldnew): + import midi + + data = FileCompareLink.get_content (self, oldnew) + midi = midi.parse (data) + tracks = midi[1] + + str = '' + j = 0 + for t in tracks: + str += 'track %d' % j + j += 1 + + for e in t: + ev_str = repr (e) + if re.search ('LilyPond [0-9.]+', ev_str): + continue + + str += ' ev %s\n' % `e` + return str + + + +class SignatureFileLink (FileLink): + def __init__ (self, f1, f2 ): + FileLink.__init__ (self, f1, f2) + self.system_links = {} + + def add_system_link (self, link, number): + self.system_links[number] = link + + def calc_distance (self): + d = 0.0 + + orphan_distance = 0.0 + for l in self.system_links.values (): + d = max (d, l.geometric_distance ()) + orphan_distance += l.orphan_count () + + return d + orphan_distance + + def add_file_compare (self, f1, f2): + system_index = [] + + def note_system_index (m): + system_index.append (int (m.group (1))) + return '' + + base1 = re.sub ("-([0-9]+).signature", note_system_index, f1) + base2 = re.sub ("-([0-9]+).signature", note_system_index, f2) + + self.base_names = (os.path.normpath (base1), + os.path.normpath (base2)) + + s1 = read_signature_file (f1) + s2 = read_signature_file (f2) + + link = SystemLink (s1, s2) + + self.add_system_link (link, system_index[0]) + + + def create_images (self, dest_dir): + + files_created = [[], []] + for oldnew in (0, 1): + pat = self.base_names[oldnew] + '.eps' + + for f in glob.glob (pat): + infile = f + outfile = (dest_dir + '/' + f).replace ('.eps', '.png') + data_option = '' + if options.local_data_dir: + data_option = ('-slilypond-datadir=%s/../share/lilypond/current ' + % os.path.dirname(infile)) + + mkdir (os.path.split (outfile)[0]) + cmd = ('gs -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4 ' + ' %(data_option)s ' + ' -r101 ' + ' -sOutputFile=%(outfile)s -dNOSAFER -dEPSCrop -q -dNOPAUSE ' + ' %(infile)s -c quit ') % locals () + + files_created[oldnew].append (outfile) + system (cmd) + + return files_created + + def link_files_for_html (self, dest_dir): + FileLink.link_files_for_html (self, dest_dir) + to_compare = [[], []] + + exts = [] + if options.create_images: + to_compare = self.create_images (dest_dir) + else: + exts += ['.png', '-page*png'] + + for ext in exts: + for oldnew in (0,1): + for f in glob.glob (self.base_names[oldnew] + ext): + dst = dest_dir + '/' + f + link_file (f, dst) + + if f.endswith ('.png'): + to_compare[oldnew].append (f) + + if options.compare_images: + for (old, new) in zip (to_compare[0], to_compare[1]): + compare_png_images (old, new, dest_dir) + + + def get_cell (self, oldnew): + def img_cell (ly, img, name): + if not name: + name = 'source' + else: + name = '%s' % name + + return ''' +
      + +
      +''' % locals () + def multi_img_cell (ly, imgs, name): + if not name: + name = 'source' + else: + name = '%s' % name + + imgs_str = '\n'.join ([''' + +
      ''' % (img, img) + for img in imgs]) + + + return ''' +%(imgs_str)s +''' % locals () + + + + def cell (base, name): + pat = base + '-page*.png' + pages = glob.glob (pat) + + if pages: + return multi_img_cell (base + '.ly', sorted (pages), name) + else: + return img_cell (base + '.ly', base + '.png', name) + + + + str = cell (os.path.splitext (self.file_names[oldnew])[0], self.name ()) + if options.compare_images and oldnew == 1: + str = str.replace ('.png', '.compare.jpeg') + + return str + + + def get_distance_details (self): + systems = self.system_links.items () + systems.sort () + + html = "" + for (c, link) in systems: + e = '%d' % c + for d in link.distance (): + e += '%f' % d + + e = '%s' % e + + html += e + + e = '%d' % c + for s in (link.output_expression_details_string (), + link.orphan_details_string (), + link.geo_details_string ()): + e += "%s" % s + + + e = '%s' % e + html += e + + original = self.name () + html = ''' + +comparison details for %(original)s + + + + + + + + + + +%(html)s +
      systemoutputorphangeo
      + + + +''' % locals () + return html + + +################################################################ +# Files/directories + +import glob +import re + +def compare_signature_files (f1, f2): + s1 = read_signature_file (f1) + s2 = read_signature_file (f2) + + return SystemLink (s1, s2).distance () + +def paired_files (dir1, dir2, pattern): + """ + Search DIR1 and DIR2 for PATTERN. + + Return (PAIRED, MISSING-FROM-2, MISSING-FROM-1) + + """ + + files = [] + for d in (dir1,dir2): + found = [os.path.split (f)[1] for f in glob.glob (d + '/' + pattern)] + found = dict ((f, 1) for f in found) + files.append (found) + + pairs = [] + missing = [] + for f in files[0]: + try: + files[1].pop (f) + pairs.append (f) + except KeyError: + missing.append (f) + + return (pairs, files[1].keys (), missing) + +class ComparisonData: + def __init__ (self): + self.result_dict = {} + self.missing = [] + self.added = [] + self.file_links = {} + + def read_sources (self): + + ## ugh: drop the .ly.txt + for (key, val) in self.file_links.items (): + + def note_original (match, ln=val): + key = ln.name () + hash_to_original_name[key] = match.group (1) + return '' + + sf = val.source_file () + if sf: + re.sub (r'\\sourcefilename "([^"]+)"', + note_original, open (sf).read ()) + else: + print 'no source for', val + + def compare_trees (self, dir1, dir2): + self.compare_directories (dir1, dir2) + + (root, dirs, files) = os.walk (dir1).next () + for d in dirs: + d1 = os.path.join (dir1, d) + d2 = os.path.join (dir2, d) + + if os.path.islink (d1) or os.path.islink (d2): + continue + + if os.path.isdir (d2): + self.compare_trees (d1, d2) + + def compare_directories (self, dir1, dir2): + for ext in ['signature', + 'midi', + 'log', + 'profile', + 'gittxt']: + (paired, m1, m2) = paired_files (dir1, dir2, '*.' + ext) + + self.missing += [(dir1, m) for m in m1] + self.added += [(dir2, m) for m in m2] + + for p in paired: + if (options.max_count + and len (self.file_links) > options.max_count): + continue + + f2 = dir2 + '/' + p + f1 = dir1 + '/' + p + self.compare_files (f1, f2) + + def compare_files (self, f1, f2): + if f1.endswith ('signature'): + self.compare_signature_files (f1, f2) + else: + ext = os.path.splitext (f1)[1] + klasses = { + '.midi': MidiFileLink, + '.log' : LogFileCompareLink, + '.profile': ProfileFileLink, + '.gittxt': GitFileCompareLink, + } + + if klasses.has_key (ext): + self.compare_general_files (klasses[ext], f1, f2) + + def compare_general_files (self, klass, f1, f2): + name = os.path.split (f1)[1] + + file_link = klass (f1, f2) + self.file_links[name] = file_link + + def compare_signature_files (self, f1, f2): + name = os.path.split (f1)[1] + name = re.sub ('-[0-9]+.signature', '', name) + + file_link = None + try: + file_link = self.file_links[name] + except KeyError: + generic_f1 = re.sub ('-[0-9]+.signature', '.ly', f1) + generic_f2 = re.sub ('-[0-9]+.signature', '.ly', f2) + file_link = SignatureFileLink (generic_f1, generic_f2) + self.file_links[name] = file_link + + file_link.add_file_compare (f1, f2) + + def write_changed (self, dest_dir, threshold): + (changed, below, unchanged) = self.thresholded_results (threshold) + + str = '\n'.join ([os.path.splitext (link.file_names[1])[0] + for link in changed]) + fn = dest_dir + '/changed.txt' + + open_write_file (fn).write (str) + + def thresholded_results (self, threshold): + ## todo: support more scores. + results = [(link.distance(), link) + for link in self.file_links.values ()] + results.sort () + results.reverse () + + unchanged = [r for (d,r) in results if d == 0.0] + below = [r for (d,r) in results if threshold >= d > 0.0] + changed = [r for (d,r) in results if d > threshold] + + return (changed, below, unchanged) + + def write_text_result_page (self, filename, threshold): + out = None + if filename == '': + out = sys.stdout + else: + print 'writing "%s"' % filename + out = open_write_file (filename) + + (changed, below, unchanged) = self.thresholded_results (threshold) + + + for link in changed: + out.write (link.text_record_string ()) + + out.write ('\n\n') + out.write ('%d below threshold\n' % len (below)) + out.write ('%d unchanged\n' % len (unchanged)) + + def create_text_result_page (self, dir1, dir2, dest_dir, threshold): + self.write_text_result_page (dest_dir + '/index.txt', threshold) + + def create_html_result_page (self, dir1, dir2, dest_dir, threshold): + dir1 = dir1.replace ('//', '/') + dir2 = dir2.replace ('//', '/') + + (changed, below, unchanged) = self.thresholded_results (threshold) + + + html = '' + old_prefix = os.path.split (dir1)[1] + for link in changed: + html += link.html_record_string (dest_dir) + + + short_dir1 = shorten_string (dir1) + short_dir2 = shorten_string (dir2) + html = ''' + + + + + + +%(html)s +
      distance%(short_dir1)s%(short_dir2)s
      +''' % locals() + + html += ('

      ') + below_count = len (below) + + if below_count: + html += ('

      %d below threshold

      ' % below_count) + + html += ('

      %d unchanged

      ' % len (unchanged)) + + dest_file = dest_dir + '/index.html' + open_write_file (dest_file).write (html) + + + for link in changed: + link.link_files_for_html (dest_dir) + + + def print_results (self, threshold): + self.write_text_result_page ('', threshold) + +def compare_trees (dir1, dir2, dest_dir, threshold): + data = ComparisonData () + data.compare_trees (dir1, dir2) + data.read_sources () + + + data.print_results (threshold) + + if os.path.isdir (dest_dir): + system ('rm -rf %s '% dest_dir) + + data.write_changed (dest_dir, threshold) + data.create_html_result_page (dir1, dir2, dest_dir, threshold) + data.create_text_result_page (dir1, dir2, dest_dir, threshold) + +################################################################ +# TESTING + +def mkdir (x): + if not os.path.isdir (x): + print 'mkdir', x + os.makedirs (x) + +def link_file (x, y): + mkdir (os.path.split (y)[0]) + try: + print x, '->', y + os.link (x, y) + except OSError, z: + print 'OSError', x, y, z + raise OSError + +def open_write_file (x): + d = os.path.split (x)[0] + mkdir (d) + return open (x, 'w') + + +def system (x): + + print 'invoking', x + stat = os.system (x) + assert stat == 0 + + +def test_paired_files (): + print paired_files (os.environ["HOME"] + "/src/lilypond/scripts/", + os.environ["HOME"] + "/src/lilypond-stable/scripts/build/", '*.py') + + +def test_compare_trees (): + system ('rm -rf dir1 dir2') + system ('mkdir dir1 dir2') + system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir1') + system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir2') + system ('cp 20expr{-*.signature,.ly,.png,.eps,.log,.profile} dir1') + system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir2/') + system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir1/') + system ('cp 19-1.signature 19.sub-1.signature') + system ('cp 19.ly 19.sub.ly') + system ('cp 19.profile 19.sub.profile') + system ('cp 19.log 19.sub.log') + system ('cp 19.png 19.sub.png') + system ('cp 19.eps 19.sub.eps') + + system ('cp 20multipage* dir1') + system ('cp 20multipage* dir2') + system ('cp 19multipage-1.signature dir2/20multipage-1.signature') + + + system ('mkdir -p dir1/subdir/ dir2/subdir/') + system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir1/subdir/') + system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir2/subdir/') + system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir2/') + system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir1/') + system ('echo HEAD is 1 > dir1/tree.gittxt') + system ('echo HEAD is 2 > dir2/tree.gittxt') + + ## introduce differences + system ('cp 19-1.signature dir2/20-1.signature') + system ('cp 19.profile dir2/20.profile') + system ('cp 19.png dir2/20.png') + system ('cp 19multipage-page1.png dir2/20multipage-page1.png') + system ('cp 20-1.signature dir2/subdir/19.sub-1.signature') + system ('cp 20.png dir2/subdir/19.sub.png') + system ("sed 's/: /: 1/g' 20.profile > dir2/subdir/19.sub.profile") + + ## radical diffs. + system ('cp 19-1.signature dir2/20grob-1.signature') + system ('cp 19-1.signature dir2/20grob-2.signature') + system ('cp 19multipage.midi dir1/midi-differ.midi') + system ('cp 20multipage.midi dir2/midi-differ.midi') + system ('cp 19multipage.log dir1/log-differ.log') + system ('cp 19multipage.log dir2/log-differ.log && echo different >> dir2/log-differ.log && echo different >> dir2/log-differ.log') + + compare_trees ('dir1', 'dir2', 'compare-dir1dir2', options.threshold) + + +def test_basic_compare (): + ly_template = r""" + +\version "2.10.0" +#(define default-toplevel-book-handler + print-book-with-defaults-as-systems ) + +#(ly:set-option (quote no-point-and-click)) + +\sourcefilename "my-source.ly" + +%(papermod)s +\header { tagline = ##f } +\score { +<< +\new Staff \relative c { + c4^"%(userstring)s" %(extragrob)s + } +\new Staff \relative c { + c4^"%(userstring)s" %(extragrob)s + } +>> +\layout{} +} + +""" + + dicts = [{ 'papermod' : '', + 'name' : '20', + 'extragrob': '', + 'userstring': 'test' }, + { 'papermod' : '#(set-global-staff-size 19.5)', + 'name' : '19', + 'extragrob': '', + 'userstring': 'test' }, + { 'papermod' : '', + 'name' : '20expr', + 'extragrob': '', + 'userstring': 'blabla' }, + { 'papermod' : '', + 'name' : '20grob', + 'extragrob': 'r2. \\break c1', + 'userstring': 'test' }, + ] + + for d in dicts: + open (d['name'] + '.ly','w').write (ly_template % d) + + names = [d['name'] for d in dicts] + + system ('lilypond -ddump-profile -dseparate-log-files -ddump-signatures --png -dbackend=eps ' + ' '.join (names)) + + + multipage_str = r''' + #(set-default-paper-size "a6") + \score { + \relative {c1 \pageBreak c1 } + \layout {} + \midi {} + } + ''' + + open ('20multipage.ly', 'w').write (multipage_str.replace ('c1', 'd1')) + open ('19multipage.ly', 'w').write ('#(set-global-staff-size 19.5)\n' + multipage_str) + system ('lilypond -dseparate-log-files -ddump-signatures --png 19multipage 20multipage ') + + test_compare_signatures (names) + +def test_compare_signatures (names, timing=False): + import time + + times = 1 + if timing: + times = 100 + + t0 = time.clock () + + count = 0 + for t in range (0, times): + sigs = dict ((n, read_signature_file ('%s-1.signature' % n)) for n in names) + count += 1 + + if timing: + print 'elapsed', (time.clock() - t0)/count + + + t0 = time.clock () + count = 0 + combinations = {} + for (n1, s1) in sigs.items(): + for (n2, s2) in sigs.items(): + combinations['%s-%s' % (n1, n2)] = SystemLink (s1,s2).distance () + count += 1 + + if timing: + print 'elapsed', (time.clock() - t0)/count + + results = combinations.items () + results.sort () + for k,v in results: + print '%-20s' % k, v + + assert combinations['20-20'] == (0.0,0.0,0.0) + assert combinations['20-20expr'][0] > 0.0 + assert combinations['20-19'][2] < 10.0 + assert combinations['20-19'][2] > 0.0 + + +def run_tests (): + dir = 'test-output-distance' + + do_clean = not os.path.exists (dir) + + print 'test results in ', dir + if do_clean: + system ('rm -rf ' + dir) + system ('mkdir ' + dir) + + os.chdir (dir) + if do_clean: + test_basic_compare () + + test_compare_trees () + +################################################################ +# + +def main (): + p = optparse.OptionParser ("output-distance - compare LilyPond formatting runs") + p.usage = 'output-distance.py [options] tree1 tree2' + + p.add_option ('', '--test-self', + dest="run_test", + action="store_true", + help='run test method') + + p.add_option ('--max-count', + dest="max_count", + metavar="COUNT", + type="int", + default=0, + action="store", + help='only analyze COUNT signature pairs') + + p.add_option ('', '--threshold', + dest="threshold", + default=0.3, + action="store", + type="float", + help='threshold for geometric distance') + + p.add_option ('--no-compare-images', + dest="compare_images", + default=True, + action="store_false", + help="Don't run graphical comparisons") + + p.add_option ('--create-images', + dest="create_images", + default=False, + action="store_true", + help="Create PNGs from EPSes") + + + p.add_option ('--local-datadir', + dest="local_data_dir", + default=False, + action="store_true", + help='whether to use the share/lilypond/ directory in the test directory') + + p.add_option ('-o', '--output-dir', + dest="output_dir", + default=None, + action="store", + type="string", + help='where to put the test results [tree2/compare-tree1tree2]') + + global options + (options, args) = p.parse_args () + + if options.run_test: + run_tests () + sys.exit (0) + + if len (args) != 2: + p.print_usage() + sys.exit (2) + + name = options.output_dir + if not name: + name = args[0].replace ('/', '') + name = os.path.join (args[1], 'compare-' + shorten_string (name)) + + compare_trees (args[0], args[1], name, options.threshold) + +if __name__ == '__main__': + main() + diff --git a/scripts/build/pytt.py b/scripts/build/pytt.py new file mode 100644 index 0000000000..09f5c7b1f6 --- /dev/null +++ b/scripts/build/pytt.py @@ -0,0 +1,24 @@ +#!@PYTHON@ + +import os +import re +import sys + +frm = re.compile (sys.argv[1], re.MULTILINE) +to = sys.argv[2] + +if not sys.argv[3:] or sys.argv[3] == '-': + sys.stdout.write (re.sub (frm, to, sys.stdin.read ())) +for file in sys.argv[3:]: + s = open (file).read () + name = os.path.basename (file) + base, ext = os.path.splitext (name) + t = re.sub (frm, to % locals (), s) + if s != t: + if 1: + os.system ('mv %(file)s %(file)s~~' % locals ()) + h = open (file, "w") + h.write (t) + h.close () + else: + sys.stdout.write (t) diff --git a/scripts/build/texi-gettext.py b/scripts/build/texi-gettext.py new file mode 100644 index 0000000000..546819b155 --- /dev/null +++ b/scripts/build/texi-gettext.py @@ -0,0 +1,77 @@ +#!@PYTHON@ +# -*- coding: utf-8 -*- +# texi-gettext.py + +# USAGE: texi-gettext.py [-o OUTDIR] LANG FILES +# +# -o OUTDIR specifies that output files should rather be written in OUTDIR +# + +print "texi_gettext.py" + +import sys +import re +import os +import getopt + +import langdefs + +optlist, args = getopt.getopt (sys.argv[1:],'o:') +lang = args[0] +files = args[1:] + +outdir = '.' +for x in optlist: + if x[0] == '-o': + outdir = x[1] + +double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep +_doc = langdefs.translation[lang] + +include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M) +whitespaces = re.compile (r'\s+') +ref_re = re.compile (r'(?ms)@(rglos|ruser|rprogram|ref)(\{)(.*?)(\})') +node_section_re = re.compile (r'@(node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading)( )(.*?)(\n)') +menu_entry_re = re.compile (r'\* (.*?)::') + +def title_gettext (m): + if m.group (2) == '{': + r = whitespaces.sub (' ', m.group (3)) + else: + r = m.group (3) + return '@' + m.group (1) + m.group (2) + _doc (r) + m.group (4) + +def menu_entry_gettext (m): + return '* ' + _doc (m.group (1)) + '::' + +def include_replace (m, filename): + if os.path.exists (os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'): + return '@include ' + m.group(1) + '.pdftexi' + return m.group(0) + +def process_file (filename): + print "Processing %s" % filename + f = open (filename, 'r') + page = f.read () + f.close() + page = node_section_re.sub (title_gettext, page) + page = ref_re.sub (title_gettext, page) + page = menu_entry_re.sub (menu_entry_gettext, page) + page = page.replace ("""-- SKELETON FILE -- +When you actually translate this file, please remove these lines as +well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", '') + page = page.replace ('UNTRANSLATED NODE: IGNORE ME', _doc ("This section has not been translated yet; please refer to the manual in English.")) + includes = include_re.findall (page) + page = include_re.sub (lambda m: include_replace (m, filename), page) + p = os.path.join (outdir, filename) [:-4] + 'pdftexi' + f = open (p, 'w') + f.write (page) + f.close () + dir = os.path.dirname (filename) + for file in includes: + p = os.path.join (dir, file) + '.texi' + if os.path.exists (p): + process_file (p) + +for filename in files: + process_file (filename) diff --git a/scripts/build/texi2omf.py b/scripts/build/texi2omf.py new file mode 100644 index 0000000000..cc2603f5e0 --- /dev/null +++ b/scripts/build/texi2omf.py @@ -0,0 +1,154 @@ +#!@PYTHON@ + +import getopt +import os +import re +import sys +import time + +def usage (): + sys.stderr.write (''' +texi2omf [options] FILE.texi > FILE.omf + +Options: + +--format=FORM set format FORM (HTML, PS, PDF, [XML]). +--location=FILE file name as installed on disk. +--version=VERSION + +Use the following commands (enclose in @ignore) + +@omfsubject . . +@omfdescription . . +@omftype . . + +etc. + + +''') + +(options, files) = getopt.getopt (sys.argv[1:], '', + ['format=', 'location=', 'version=']) + +license = 'FDL' +location = '' +version = '' +email = os.getenv ('MAILADDRESS') +name = os.getenv ('USERNAME') +format = 'xml' + +for (o, a) in options: + if o == '--format': + format = a + elif o == '--location': + location = 'file:%s' % a + elif o == '--version': + version = a + else: + assert 0 + + +if not files: + usage () + sys.exit (2) + + +formats = { + 'html' : 'text/html', + 'pdf' : 'application/pdf', + 'ps.gz' : 'application/postscript', + 'ps' : 'application/postscript', + 'xml' : 'text/xml', + } + +if not formats.has_key (format): + sys.stderr.write ("Format `%s' unknown\n" % format) + sys.exit (1) + + +infile = files[0] + +today = time.localtime () + +texi = open (infile).read () + +if not location: + location = 'file:/%s' % re.sub (r'\..*', '.' + format, infile) + +omf_vars = { + 'date': '%d-%d-%d' % today[:3], + 'mimeformat': formats[format], + 'maintainer': "%s (%s)" % (name, email), + 'version' : version, + 'location' : location, + 'language' : 'C', + } + +omf_caterories = ['subject', 'creator', 'maintainer', 'contributor', + 'title', 'subtitle', 'version', 'category', 'type', + 'description', 'license', 'language',] + +for a in omf_caterories: + m = re.search ('@omf%s (.*)\n'% a, texi) + if m: + omf_vars[a] = m.group (1) + elif not omf_vars.has_key (a): + omf_vars[a] = '' + +if not omf_vars['title']: + title = '' + m = re.search ('@title (.*)\n', texi) + if m: + title = m.group (1) + + subtitle = '' + m = re.search ('@subtitle (.*)\n', texi) + if m: + subtitle = m.group (1) + + if subtitle: + title = '%s -- %s' % (title, subtitle) + + omf_vars['title'] = title + +if not omf_vars['creator']: + m = re.search ('@author (.*)\n', texi) + if m: + omf_vars['creator'] = m.group (1) + + + +print r''' + + + + + %(creator)s + + + %(maintainer)s + + + %(title)s + + + %(date)s + + + + + %(description)s + + + %(type)s + + + + + + + + +''' % omf_vars + + diff --git a/scripts/build/www_post.py b/scripts/build/www_post.py new file mode 100644 index 0000000000..29f80cf09f --- /dev/null +++ b/scripts/build/www_post.py @@ -0,0 +1,100 @@ +#!@PYTHON@ + +## This is www_post.py. This script is the main stage +## of toplevel GNUmakefile local-WWW-post target. + +# USAGE: www_post PACKAGE_NAME TOPLEVEL_VERSION OUTDIR TARGETS +# please call me from top of the source directory + +import sys +import os +import re + +import langdefs + +import mirrortree +import postprocess_html + +package_name, package_version, outdir, targets = sys.argv[1:] +targets = targets.split (' ') +outdir = os.path.normpath (outdir) +doc_dirs = ['input', 'Documentation', outdir] +target_pattern = os.path.join (outdir, '%s-root') + +# these redirection pages allow to go back to the documentation index +# from HTML manuals/snippets page +static_files = { + os.path.join (outdir, 'index.html'): + ''' +Redirecting to the documentation index...\n''', + os.path.join (outdir, 'VERSION'): + package_version + '\n', + os.path.join ('input', 'lsr', outdir, 'index.html'): + ''' +Redirecting to the documentation index...\n''' + } + +for l in langdefs.LANGUAGES: + static_files[os.path.join ('Documentation', 'user', outdir, l.file_name ('index', '.html'))] = \ + '\nRedirecting to the documentation index...\n' + +for f, contents in static_files.items (): + open (f, 'w').write (contents) + +sys.stderr.write ("Mirrorring...\n") +dirs, symlinks, files = mirrortree.walk_tree ( + tree_roots = doc_dirs, + process_dirs = outdir, + exclude_dirs = '(^|/)(' + r'|po|out|out-test|.*?[.]t2d|\w*?-root)(/|$)|Documentation/(' + '|'.join ([l.code for l in langdefs.LANGUAGES]) + ')', + find_files = r'.*?\.(?:midi|html|pdf|png|txt|i?ly|signature|css)$|VERSION', + exclude_files = r'lily-[0-9a-f]+.*\.(pdf|txt)') + +# actual mirrorring stuff +html_files = [] +hardlinked_files = [] +for f in files: + if f.endswith ('.html'): + html_files.append (f) + else: + hardlinked_files.append (f) +dirs = [re.sub ('/' + outdir, '', d) for d in dirs] +while outdir in dirs: + dirs.remove (outdir) +dirs = list (set (dirs)) +dirs.sort () + +strip_file_name = {} +strip_re = re.compile (outdir + '/') +for t in targets: + out_root = target_pattern % t + strip_file_name[t] = lambda s: os.path.join (target_pattern % t, (strip_re.sub ('', s))) + os.mkdir (out_root) + map (os.mkdir, [os.path.join (out_root, d) for d in dirs]) + for f in hardlinked_files: + os.link (f, strip_file_name[t] (f)) + for l in symlinks: + p = mirrortree.new_link_path (os.path.normpath (os.readlink (l)), os.path.dirname (l), strip_re) + dest = strip_file_name[t] (l) + if not os.path.exists (dest): + os.symlink (p, dest) + + ## ad-hoc renaming to make xrefs between PDFs work + os.rename (os.path.join (out_root, 'input/lsr/lilypond-snippets.pdf'), + os.path.join (out_root, 'Documentation/user/lilypond-snippets.pdf')) + +# need this for content negotiation with documentation index +if 'online' in targets: + f = open (os.path.join (target_pattern % 'online', 'Documentation/.htaccess'), 'w') + f.write ('#.htaccess\nDirectoryIndex index\n') + f.close () + +postprocess_html.build_pages_dict (html_files) +for t in targets: + sys.stderr.write ("Processing HTML pages for %s target...\n" % t) + postprocess_html.process_html_files ( + package_name = package_name, + package_version = package_version, + target = t, + name_filter = strip_file_name[t]) + diff --git a/stepmake/stepmake/help2man-rules.make b/stepmake/stepmake/help2man-rules.make index 51073c20b1..e0498b0148 100644 --- a/stepmake/stepmake/help2man-rules.make +++ b/stepmake/stepmake/help2man-rules.make @@ -1,5 +1,5 @@ # We must invoke the generated $(outdir)/help2man script instead of -# the help2man.pl source, which means that the buildscripts directory +# the help2man.pl source, which means that the scripts/build directory # must be built first. # # From the perlrun man-page: @@ -15,10 +15,10 @@ # cases. Four more explaining what a line comment is, and that it may # be parsed, same here. -HELP2MAN_COMMAND = $(PERL) $(top-build-dir)/buildscripts/$(outbase)/help2man $< > $@ +HELP2MAN_COMMAND = $(buildscript-dir)/help2man $< > $@ ifeq ($(strip $(CROSS)),no) -$(outdir)/%.1: $(outdir)/% +$(outdir)/%.1: $(outdir)/% $(buildscript-dir)/help2man $(HELP2MAN_COMMAND) else # When cross building, some manpages will not build because the @@ -33,3 +33,6 @@ ifneq ($(outdir),./out) $(outdir)/%.1: out/%.1 cp $< $@ endif + +$(buildscript-dir)/help2man: + $(MAKE) -C $(depth)/scripts/build diff --git a/stepmake/stepmake/metafont-rules.make b/stepmake/stepmake/metafont-rules.make index 592a47b351..ab28ced9a5 100644 --- a/stepmake/stepmake/metafont-rules.make +++ b/stepmake/stepmake/metafont-rules.make @@ -19,7 +19,7 @@ $(outdir)/%.pfb: %.mf $(outdir)/mf2pt1.mem TMP=`mktemp -d $(outdir)/pfbtemp.XXXXXXXXX` \ && ( cd $$TMP \ && ln -s ../mf2pt1.mem . \ - && MFINPUTS=$(top-src-dir)/mf:..:: $(PERL) $(top-src-dir)/buildscripts/mf2pt1.pl $(MF2PT1_OPTIONS) $< $(METAFONT_QUIET)) \ + && MFINPUTS=$(top-src-dir)/mf:..:: $(buildscript-dir)/mf2pt1 $(MF2PT1_OPTIONS) $< $(METAFONT_QUIET)) \ && mv $$TMP/*pfb $(outdir); \ rm -rf $$TMP diff --git a/stepmake/stepmake/metapost-rules.make b/stepmake/stepmake/metapost-rules.make deleted file mode 100644 index 3bf28cab83..0000000000 --- a/stepmake/stepmake/metapost-rules.make +++ /dev/null @@ -1,14 +0,0 @@ -# Don't remove $(outdir)/.log's. Logs are a target! - -$(outdir)/%.0: %.mf $(outdir)/mfplain.mem - -$(METAPOST) "&$(outdir)/mfplain \mode=lowres; \mag=1.0; nonstopmode; input $<" - -$(outdir)/mfplain.mem: $(MFPLAIN_MP) - $(INIMETAPOST) $(INIMETAPOST_FLAGS) $(MFPLAIN_MP) dump - mv mfplain.* $(outdir) - -$(outdir)/%.pfa: $(outdir)/%.0 - $(PYTHON) $(depth)/buildscripts/ps-to-pfa.py --output $(basename $<).pfa $< - rm -f $(basename $(@F)).[0-9]* - rm -f $(basename $(@F)).tfm $(basename $(@F)).log - diff --git a/stepmake/stepmake/metapost-targets.make b/stepmake/stepmake/metapost-targets.make deleted file mode 100644 index d976642f23..0000000000 --- a/stepmake/stepmake/metapost-targets.make +++ /dev/null @@ -1,2 +0,0 @@ - -pfa: $(PFA_FILES) diff --git a/stepmake/stepmake/metapost-vars.make b/stepmake/stepmake/metapost-vars.make deleted file mode 100644 index 1dae6a2171..0000000000 --- a/stepmake/stepmake/metapost-vars.make +++ /dev/null @@ -1,4 +0,0 @@ - -MP_PFA_FILES = $(addprefix $(outdir)/, $(FONT_FILES:.mf=.pfa)) -PFA_FILES += $(MP_PFA_FILES) - diff --git a/stepmake/stepmake/texinfo-rules.make b/stepmake/stepmake/texinfo-rules.make index ae2c4acc8f..27924f71e1 100644 --- a/stepmake/stepmake/texinfo-rules.make +++ b/stepmake/stepmake/texinfo-rules.make @@ -6,12 +6,12 @@ # $(outdir)/$(INFO_IMAGES_DIR)/*.png symlinks are only needed to view # out-www/*.info with Emacs -- HTML docs no longer need these # symlinks, see replace_symlinks_urls in -# buildscripts/add_html_footer.py. +# python/aux/postprocess_html.py. # make dereferences symlinks, and $(INFO_IMAGES_DIR) is a symlink # to $(outdir), so we can't use directly $(INFO_IMAGES_DIR) as a # prerequisite, otherwise %.info are always outdated (because older -# than $(outdir), hence this .dep file +# than $(outdir)), hence this .dep file $(outdir)/$(INFO_IMAGES_DIR).info-images-dir-dep: $(INFO_DOCS:%=$(outdir)/%.texi) ifneq ($(INFO_IMAGES_DIR),) @@ -19,7 +19,7 @@ ifneq ($(INFO_IMAGES_DIR),) ln -s $(outdir) $(INFO_IMAGES_DIR) mkdir -p $(outdir)/$(INFO_IMAGES_DIR) rm -f $(outdir)/$(INFO_IMAGES_DIR)/[a-f0-9][a-f0-9] - cd $(outdir)/$(INFO_IMAGES_DIR) && $(PYTHON) $(top-src-dir)/buildscripts/mass-link.py symbolic .. . [a-f0-9][a-f0-9] + cd $(outdir)/$(INFO_IMAGES_DIR) && $(buildscript-dir)/mass-link symbolic .. . [a-f0-9][a-f0-9] endif touch $@ @@ -72,7 +72,7 @@ $(outdir)/%.texi: %.texi cp $< $@ $(XREF_MAPS_DIR)/%.xref-map: $(outdir)/%.texi - $(PYTHON) $(buildscript-dir)/extract_texi_filenames.py -o $(XREF_MAPS_DIR) $< + $(buildscript-dir)/extract_texi_filenames -o $(XREF_MAPS_DIR) $< $(outdir)/version.%: $(top-src-dir)/VERSION diff --git a/stepmake/stepmake/texinfo-vars.make b/stepmake/stepmake/texinfo-vars.make index 0fbf0f5919..ca567b6868 100644 --- a/stepmake/stepmake/texinfo-vars.make +++ b/stepmake/stepmake/texinfo-vars.make @@ -6,7 +6,7 @@ TEXINFO_SOURCES = $(TEXI_FILES) OUTTXT_FILES += $(addprefix $(outdir)/,$(TEXI_FILES:.texi=.txt)) -GENERATE_OMF = $(PYTHON) $(buildscript-dir)/texi2omf.py --format $(1) --location $(webdir)/$(tree-dir)/out-www/$(notdir $(basename $@)) --version $(TOPLEVEL_VERSION) $< > $@ +GENERATE_OMF = $(buildscript-dir)/texi2omf --format $(1) --location $(webdir)/$(tree-dir)/out-www/$(notdir $(basename $@)) --version $(TOPLEVEL_VERSION) $< > $@ TEXINFO_PAPERSIZE_OPTION= $(if $(findstring $(PAPERSIZE),a4),,-t @afourpaper) diff --git a/tex/SConscript b/tex/SConscript deleted file mode 100644 index e9feaa3131..0000000000 --- a/tex/SConscript +++ /dev/null @@ -1,5 +0,0 @@ -# -*-python-*- - -Import ('env', 'install', 'src_glob') -sources = src_glob ('*.tex') -install (sources, env['sharedir_package_version'] + '/tex') diff --git a/vim/GNUmakefile b/vim/GNUmakefile index fd82e40164..ad032e42fe 100644 --- a/vim/GNUmakefile +++ b/vim/GNUmakefile @@ -9,7 +9,7 @@ EXTRA_DIST_FILES=$(call src-wildcard,*.vim) vimrc LILYPOND_WORDS = $(outdir)/lilypond-words $(outdir)/lilypond-words.vim LILYPOND_WORDS_DEPENDS =\ $(top-src-dir)/lily/lily-lexer.cc \ - $(buildscript-dir)/lilypond-words.py \ + $(buildscript-dir)/lilypond-words \ $(top-src-dir)/scm/markup.scm \ $(top-src-dir)/ly/engraver-init.ly @@ -31,8 +31,11 @@ local-uninstall: done -rmdir -p $(DESTDIR)$(vimdir) +$(buildscript-dir)/lilypond-words: + make -C $(depth)/scripts/build + $(LILYPOND_WORDS): - cd $(top-src-dir) && $(PYTHON) buildscripts/lilypond-words.py --words --vim --dir=$(top-build-dir)/vim/$(outconfbase) + cd $(top-src-dir) && $(buildscript-dir)/lilypond-words --words --vim --dir=$(top-build-dir)/vim/$(outconfbase) all: $(LILYPOND_WORDS) diff --git a/vim/SConscript b/vim/SConscript deleted file mode 100644 index 6896a7b6f6..0000000000 --- a/vim/SConscript +++ /dev/null @@ -1,15 +0,0 @@ -# -*-python-*- - -Import ('env', 'install', 'src_glob') -sources = src_glob ('*.vim') + ['lilypond-words.vim'] - -e = env.Copy () -a = '$PYTHON $srcdir/buildscripts/lilypond-words.py --words --vim --dir=${TARGET.dir}' -e.Command ('lilypond-words.vim', - ['#/lily/lily-lexer.cc', - '#/buildscripts/lilypond-words.py', - '#/scm/markup.scm', - '#/ly/engraver-init.ly',], - a) - -install (sources, env['sharedir_package_version'] + '/vim')