$(outdir)/%.css: %.css
ln -f $< $@
+
+### Translations maintenance targets
+
po-update:
make -C po po-update
cp fr/GNUmakefile $(ISOLANG)
cp fr/user/GNUmakefile $(ISOLANG)/user
sed -i -e 's/ISOLANG *= *fr/ISOLANG = $(ISOLANG)/' $(ISOLANG)/GNUmakefile $(ISOLANG)/user/GNUmakefile
- $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) -o doc.pot --skeleton --gettext ../user/lilypond-learning.tely
+ $(auxscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) -o doc.pot --skeleton --gettext ../user/lilypond-learning.tely
mv $(outdir)/*.*tely $(ISOLANG)/user
msgmerge -U po/lilypond-doc.pot $(outdir)/doc.pot
cp po/lilypond-doc.pot po/$(ISOLANG).po
TELY_FILES = $(call src-wildcard,$(ISOLANG)/user/*.tely)
skeleton-update:
- $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) --skeleton $(TELY_FILES:$(ISOLANG)/user/%.tely=../user/%.tely)
- $(PYTHON) $(buildscript-dir)/texi-skeleton-update.py $(ISOLANG)/user $(outdir)
+ $(auxscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) --skeleton $(TELY_FILES:$(ISOLANG)/user/%.tely=../user/%.tely)
+ $(auxscript-dir)/texi-skeleton-update.py $(ISOLANG)/user $(outdir)
snippet-update:
- $(PYTHON) $(buildscript-dir)/update-snippets.py user $(ISOLANG)/user '*.itely'
+ $(auxscript-dir)/update-snippets.py user $(ISOLANG)/user '*.itely'
DOCUMENTS_INCLUDES:=-I $(ISOLANG)/user \
-I $(top-build-dir)/Documentation/$(ISOLANG)/user/out-www \
endif # ISOLANG
check-xrefs:
- $(PYTHON) $(buildscript-dir)/check_texi_refs.py --batch \
- $(DOCUMENTS_INCLUDES) $(buildscript-dir)/manuals_definitions.py
+ $(auxscript-dir)/check_texi_refs.py --batch \
+ $(DOCUMENTS_INCLUDES) $(auxpython-dir)/manuals_definitions.py
fix-xrefs:
- $(PYTHON) $(buildscript-dir)/check_texi_refs.py --auto-fix \
- $(DOCUMENTS_INCLUDES) $(buildscript-dir)/manuals_definitions.py
+ $(auxscript-dir)/check_texi_refs.py --auto-fix \
+ $(DOCUMENTS_INCLUDES) $(auxpython-dir)/manuals_definitions.py
check-translation:
- $(PYTHON) $(buildscript-dir)/check_translation.py $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES)
+ $(auxscript-dir)/check_translation.py $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES)
update-translation:
- $(PYTHON) $(buildscript-dir)/check_translation.py --update $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES)
+ $(auxscript-dir)/check_translation.py --update $(CHECK_TRANSLATION_FLAGS) $(CHECKED_FILES)
translation-status:
make -C po out=www messages
- $(PYTHON) $(buildscript-dir)/translations-status.py
+ $(auxscript-dir)/translations-status.py
local-help: extra-local-help
where <MY-LANGUAGE> is the ISO 639 language code.
Add a language definition for your language in
-buildscripts/langdefs.py.
+python/langdefs.py.
See next section about what files to translate and the following
detailed instructions after the next section.
TECHNICAL BACKGROUND
A number of Python scripts handle a part of the documentation
-translation process. All are located in buildscripts/, except
-langdefs.py which is in python/
+translation process.
+All scripts used to maintain the translations
+are located in scripts/aux/:
-* buildlib.py -- module containing common functions (read piped output
-of a shell command, use Git)
-* langdefs.py -- language definitions module
* check_translation.py -- show diff to update a translation
* texi-langutils.py -- quickly and dirtily parse Texinfo files to
make message catalogs and Texinfo skeleton files
* texi-skeleton-update.py -- update Texinfo skeleton files
+* update-snippets.py -- synchronize ly snippets with those from
+English docs
+* translations-status.py -- update translations status pages and word
+counts in the file you are reading.
+* tely-gettext.py -- gettext node names, section titles and references
+in the sources; WARNING only use this script when support for
+"makeinfo --html" has been dropped.
+
+Other scripts are used in the build process, in scripts/build/:
* html-gettext.py -- translate node names, section titles and cross
references in HTML files generated by makeinfo
-* add_html_footer.py (module imported by www_post.py) -- add footer and
-tweak links in HTML pages
* texi-gettext.py -- gettext node names, section titles and references
before calling texi2pdf
* mass-link.py -- link or symlink files between English documentation
and documentation in other languages
-* update-snippets.py -- synchronize ly snippets with those from
-English docs
-* translations-status.py -- update translations status pages and word
-counts in the file you are reading.
+
+Python modules used by scripts in scripts/aux/ or scripts/build/ (but
+not by installed Python scripts) are located in python/aux/:
+* manuals_definitions.py -- define manual names and name of
+cross-reference Texinfo macros
+* buildlib.py -- common functions (read piped output
+of a shell command, use Git)
+* postprocess_html.py (module imported by www_post.py) -- add footer and
+tweak links in HTML pages
+
+And finally
+* python/langdefs.py -- language definitions module
ln -f $< $@
$(outdir)/%.html: %.bib
- BSTINPUTS=$(src-dir) $(PYTHON) $(buildscript-dir)/bib2html.py -o $@ $<
+ BSTINPUTS=$(src-dir) $(buildscript-dir)/bib2html -o $@ $<
local-clean:
rm -f fonts.aux fonts.log feta*.tfm feta*.*pk
ifeq ($(PLATFORM_WINDOWS),yes)
$(outdir)/%.ico: %.xpm
- $(PYTHON) $(buildscript-dir)/genicon.py $< $@
+ $(buildscript-dir)/genicon $< $@
default: $(lilypond-icon) $(ly-icon)
For checking the coverage of the test suite, do the following
@example
-./buildscripts/build-coverage.sh
+./scripts/aux/build-coverage.sh
@emph{# uncovered files, least covered first}
-python ./buildscripts/coverage.py --summary out-cov/*.cc
+./scripts/aux/coverage.py --summary out-cov/*.cc
@emph{# consecutive uncovered lines, longest first}
-python ./buildscripts/coverage.py --uncovered out-cov/*.cc
+./scripts/aux/coverage.py --uncovered out-cov/*.cc
@end example
depth = .
-SUBDIRS = buildscripts python scripts \
+SUBDIRS = python scripts \
flower lily \
mf ly \
tex ps scm \
WEB_TARGETS = offline
WWW-post:
-# need UTF8 setting in case this is hosted on a website.
+# need UTF8 setting in case this is hosted on a website.
echo -e 'AddDefaultCharset utf-8\nAddCharset utf-8 .html\nAddCharset utf-8 .en\nAddCharset utf-8 .nl\nAddCharset utf-8 .txt\n' > $(top-build-dir)/.htaccess
- $(PYTHON) $(buildscript-dir)/mutopia-index.py -o $(outdir)/examples.html input/
+ $(buildscript-dir)/mutopia-index -o $(outdir)/examples.html input/
find $(outdir) -name '*-root' | xargs rm -rf
- $(PYTHON) $(buildscript-dir)/www_post.py $(PACKAGE_NAME) $(TOPLEVEL_VERSION) $(outdir) "$(WEB_TARGETS)"
+ $(buildscript-dir)/www_post $(PACKAGE_NAME) $(TOPLEVEL_VERSION) $(outdir) "$(WEB_TARGETS)"
find $(outdir)/offline-root -type l -delete
@false
grand-replace:
- PATH=$(buildscript-dir)/$(outbase):$(PATH) $(BASH) $(buildscript-dir)/grand-replace.sh
+ $(MAKE) -C scripts/build
+ PATH=$(buildscript-dir):$(PATH) $(buildscript-dir)/grand-replace
################################################################
# testing
local-check: test
rm -rf $(RESULT_DIR)
mkdir -p $(RESULT_DIR)
- $(PYTHON) $(buildscript-dir)/output-distance.py --create-images --output-dir $(RESULT_DIR) input/regression/out-test-baseline input/regression/out-test/
+ $(buildscript-dir)/output-distance --create-images --output-dir $(RESULT_DIR) input/regression/out-test-baseline input/regression/out-test/
@find input ly -name '*.ly' -print |grep -v 'out.*/' | xargs grep '\\version' -L | grep -v "standard input" |sed 's/^/**** Missing version: /g'
user/ User manuals
po/ Translated manual node names
fr/ es/ de/ Docs translated to French, Spanish, German, resp.
- buildscripts/ Scripts for the build process
elisp/ Emacs LilyPond mode and syntax coloring
flower/ A simple C++ library
input/ Music input examples
po/ Translations for binaries and end-user scripts
ps/ PostScript library files
python/ Python modules, MIDI module
+ aux/ Python modules used by maintenance scripts
+ or in the build process
scm/ Scheme sources for LilyPond and subroutine files
scripts/ End-user scripts
+ aux/ Scripts for maintaining the sources and scripts
+ for the build process that need not be built
+ build/ Scripts for the build process that must be built
stepmake/ Generic make subroutine files
tex/ TeX and texinfo library files
vim/ Vi(M) LilyPond mode and syntax coloring
+++ /dev/null
-depth = ..
-
-STEPMAKE_TEMPLATES=script install po
-EXTRA_DIST_FILES=pfx2ttf.fontforge
-
-include $(depth)/make/stepmake.make
-
-# Should we install these? This should be handled by sysadmin or
-# packager but if she forgets...
-#INSTALLATION_OUT_SUFFIXES=1
-#INSTALLATION_OUT_DIR1=$(local_lilypond_datadir)/scripts
-#INSTALLATION_OUT_FILES1=$(outdir)/lilypond-login $(outdir)/lilypond-profile
-
-all: $(INSTALLATION_FILES)
-
+++ /dev/null
-#!@PYTHON@
-import os
-import sys
-import getopt
-import tempfile
-
-# usage:
-def usage ():
- print 'usage: %s [-s style] [-o <outfile>] BIBFILES...'
-
-(options, files) = getopt.getopt (sys.argv[1:], 's:o:', [])
-
-output = 'bib.html'
-style = 'long'
-
-for (o,a) in options:
- if o == '-h' or o == '--help':
- usage ()
- sys.exit (0)
- elif o == '-s' or o == '--style':
- style = a
- elif o == '-o' or o == '--output':
- output = a
- else:
- raise Exception ('unknown option: %s' % o)
-
-
-if style not in ['alpha','index','long','longp','long-pario','short','short-pario','split']:
- sys.stderr.write ("Unknown style \`%s'\n" % style)
-
-tempfile = tempfile.mktemp ('bib2html')
-
-if not files:
- usage ()
- sys.exit (2)
-
-
-def strip_extension (f, ext):
- (p, e) = os.path.splitext (f)
- if e == ext:
- e = ''
- return p + e
-
-nf = []
-for f in files:
- nf.append (strip_extension (f, '.bib'))
-
-files = ','.join (nf)
-
-open (tempfile + '.aux', 'w').write (r'''
-\relax
-\citation{*}
-\bibstyle{html-%(style)s}
-\bibdata{%(files)s}''' % vars ())
-
-cmd = "bibtex %s" % tempfile
-
-sys.stdout.write ("Invoking `%s'\n" % cmd)
-stat = os.system (cmd)
-if stat <> 0:
- sys.exit(1)
-
-
-#TODO: do tex -> html on output
-
-bbl = open (tempfile + '.bbl').read ()
-
-open (output, 'w').write (bbl)
-
-
-def cleanup (tempfile):
- for a in ['aux','bbl', 'blg']:
- os.unlink (tempfile + '.' + a)
-
-cleanup (tempfile)
-
+++ /dev/null
-#!/bin/sh
-
-if test "$1" == "--fresh"; then
- fresh=yes
-fi
-
-if test ! -f config-cov.make; then
- fresh=yes
-fi
-
-if test "$fresh" = "yes";
-then
- ./configure --enable-config=cov --disable-optimising \
- && make conf=cov -j2 clean \
- && perl -i~ -pe 's/-pipe /-fprofile-arcs -ftest-coverage -pipe /g' config-cov.make \
- && perl -i~ -pe 's/ -ldl / -lgcov -ldl /g' config-cov.make
-else
- find -name '*.gcda' -exec rm '{}' ';'
-fi
-
-mkdir -p scripts/out-cov/
-touch scripts/out-cov/midi2ly scripts/out-cov/midi2ly.1
-make conf=cov -j2 && \
- make conf=cov test-clean OUT_TEST=testcov LILYPOND_JOBS= && \
- make conf=cov test OUT_TEST=testcov LILYPOND_JOBS='-dtrace-scheme-coverage '
-
-if test "$?" != "0"; then
- tail -100 out-cov/test-run.log
- exit 1
-fi
-
-depth=../..
-resultdir=out/coverage-results
-
-rm -rf $resultdir
-mkdir $resultdir
-cd $resultdir
-
-ln $depth/lily/* .
-ln $depth/scm/*.scm .
-mv $depth/input/regression/out-testcov/*.scm.cov .
-ln $depth/ly/*.ly .
-ln $depth/lily/out-cov/*[ch] .
-mkdir include
-ln $depth/lily/include/* include/
-ln $depth/flower/include/* include/
-for a in *[cl] *.yy
-do
- gcov -o $depth/lily/out-cov/ -p $a > $a.gcov-summary
-done
-
-python $depth/buildscripts/coverage.py --uncovered *.cc > uncovered.txt
-python $depth/buildscripts/coverage.py --hotspots *.cc > hotspots.txt
-python $depth/buildscripts/coverage.py --summary *.cc > summary.txt
-python $depth/buildscripts/coverage.py --uncovered *.scm > uncovered-scheme.txt
-
-head -20 summary.txt
-
-cat <<EOF
-results in
-
- out/coverage-results/summary.txt
- out/coverage-results/uncovered.txt
- out/coverage-results/uncovered-scheme.txt
- out/coverage-results/hotspots.txt
-
-EOF
+++ /dev/null
-#!/bin/sh
-
-if test "$1" == "--fresh"; then
- fresh=yes
-fi
-
-if test ! -f config-prof.make; then
- fresh=yes
-fi
-
-if test "$fresh" = "yes";
-then
- ./configure --enable-config=prof --enable-optimising \
- && perl -i~ -pe 's/-pipe /-pg -pipe /g' config-prof.make \
- && perl -i~ -pe 's/ -ldl / -pg -ldl /g' config-prof.make
-fi
-
-make conf=prof -j2
-
-if test "$?" != "0"; then
- exit 2
-fi
-
-depth=../..
-resultdir=out/profile-results
-
-rm -rf $resultdir
-mkdir $resultdir
-cd $resultdir
-
-
-cat > long-score.ly << EOF
-\version "2.10.0"
-foo = \new Staff \new Voice \repeat unfold 50 \relative { c4 d8[ d16( e]~ e16[ e e) f] g8 }
-\score {
- \new ChoirStaff <<
- \foo \foo \foo \foo
- \foo \foo \foo \foo
-
- >>
- \midi {}
- \layout {}
-}
-EOF
-
-rm gmon.sum
-
-exe=$depth/out-prof/bin/lilypond
-
-## todo: figure out representative sample.
-files="wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 mozart-hrn-3 mozart-hrn-3 long-score"
-
-
-
-$exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
- -I $depth/input/mutopia/W.A.Mozart/ \
- $files
-
-
-for a in *.profile; do
- echo $a
- cat $a
-done
-
-echo 'running gprof'
-gprof $exe > profile
-
-exit 0
-
-
-## gprof -s takes forever.
-for a in seq 1 3; do
- for f in $files ; do
- $exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
- -I $depth/input/mutopia/W.A.Mozart/ \
- $f
-
- echo 'running gprof'
- if test -f gmon.sum ; then
- gprof -s $exe gmon.out gmon.sum
- else
- mv gmon.out gmon.sum
- fi
- done
-done
-
-gprof $exe gmon.sum > profile
+++ /dev/null
-#!@PYTHON@
-
-import subprocess
-import re
-import sys
-
-verbose = False
-
-def read_pipe (command):
- child = subprocess.Popen (command,
- stdout = subprocess.PIPE,
- stderr = subprocess.PIPE,
- shell = True)
- (output, error) = child.communicate ()
- code = str (child.wait ())
- if not child.stdout or child.stdout.close ():
- print "pipe failed: %(command)s" % locals ()
- if code != '0':
- error = code + ' ' + error
- return (output, error)
-
-revision_re = re.compile ('GIT [Cc]ommittish:\s+([a-f0-9]+)')
-vc_diff_cmd = 'git diff %(color_flag)s %(revision)s HEAD -- %(original)s | cat'
-
-def check_translated_doc (original, translated_file, translated_contents, color=False):
- m = revision_re.search (translated_contents)
- if not m:
- sys.stderr.write ('error: ' + translated_file + \
- ": no 'GIT committish: <hash>' found.\nPlease check " + \
- 'the whole file against the original in English, then ' + \
- 'fill in HEAD committish in the header.\n')
- sys.exit (1)
- revision = m.group (1)
-
- if color:
- color_flag = '--color'
- else:
- color_flag = '--no-color'
- c = vc_diff_cmd % vars ()
- if verbose:
- sys.stderr.write ('running: ' + c)
- return read_pipe (c)
+++ /dev/null
-#!@PYTHON@
-
-import sys
-import midi
-
-(h,tracks) = midi.parse (open (sys.argv[1]).read ())
-
-tracks = tracks[1:]
-
-for t in tracks:
- for e in t:
- print e
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-check_texi_refs.py
-Interactive Texinfo cross-references checking and fixing tool
-
-"""
-
-
-import sys
-import re
-import os
-import optparse
-import imp
-
-outdir = 'out-www'
-
-log = sys.stderr
-stdout = sys.stdout
-
-file_not_found = 'file not found in include path'
-
-warn_not_fixed = '*** Warning: this broken x-ref has not been fixed!\n'
-
-opt_parser = optparse.OptionParser (usage='check_texi_refs.py [OPTION]... FILE',
- description='''Check and fix \
-cross-references in a collection of Texinfo
-documents heavily cross-referenced each other.
-''')
-
-opt_parser.add_option ('-a', '--auto-fix',
- help="Automatically fix cross-references whenever \
-it is possible",
- action='store_true',
- dest='auto_fix',
- default=False)
-
-opt_parser.add_option ('-b', '--batch',
- help="Do not run interactively",
- action='store_false',
- dest='interactive',
- default=True)
-
-opt_parser.add_option ('-c', '--check-comments',
- help="Also check commented out x-refs",
- action='store_true',
- dest='check_comments',
- default=False)
-
-opt_parser.add_option ('-p', '--check-punctuation',
- help="Check punctuation after x-refs",
- action='store_true',
- dest='check_punctuation',
- default=False)
-
-opt_parser.add_option ("-I", '--include', help="add DIR to include path",
- metavar="DIR",
- action='append', dest='include_path',
- default=[os.path.abspath (os.getcwd ())])
-
-(options, files) = opt_parser.parse_args ()
-
-class InteractionError (Exception):
- pass
-
-
-manuals_defs = imp.load_source ('manuals_defs', files[0])
-manuals = {}
-
-def find_file (name, prior_directory='.'):
- p = os.path.join (prior_directory, name)
- out_p = os.path.join (prior_directory, outdir, name)
- if os.path.isfile (p):
- return p
- elif os.path.isfile (out_p):
- return out_p
-
- # looking for file in include_path
- for d in options.include_path:
- p = os.path.join (d, name)
- if os.path.isfile (p):
- return p
-
- # file not found in include_path: looking in `outdir' subdirs
- for d in options.include_path:
- p = os.path.join (d, outdir, name)
- if os.path.isfile (p):
- return p
-
- raise EnvironmentError (1, file_not_found, name)
-
-
-exit_code = 0
-
-def set_exit_code (n):
- global exit_code
- exit_code = max (exit_code, n)
-
-
-if options.interactive:
- try:
- import readline
- except:
- pass
-
- def yes_prompt (question, default=False, retries=3):
- d = {True: 'y', False: 'n'}.get (default, False)
- while retries:
- a = raw_input ('%s [default: %s]' % (question, d) + '\n')
- if a.lower ().startswith ('y'):
- return True
- if a.lower ().startswith ('n'):
- return False
- if a == '' or retries < 0:
- return default
- stdout.write ("Please answer yes or no.\n")
- retries -= 1
-
- def search_prompt ():
- """Prompt user for a substring to look for in node names.
-
-If user input is empty or matches no node name, return None,
-otherwise return a list of (manual, node name, file) tuples.
-
-"""
- substring = raw_input ("Enter a substring to search in node names \
-(press Enter to skip this x-ref):\n")
- if not substring:
- return None
- substring = substring.lower ()
- matches = []
- for k in manuals:
- matches += [(k, node, manuals[k]['nodes'][node][0])
- for node in manuals[k]['nodes']
- if substring in node.lower ()]
- return matches
-
-else:
- def yes_prompt (question, default=False, retries=3):
- return default
-
- def search_prompt ():
- return None
-
-
-ref_re = re.compile \
- ('@(ref|ruser|rlearning|rprogram|rglos)(?:\\{(?P<ref>[^,\\\\\\}]+?)|\
-named\\{(?P<refname>[^,\\\\]+?),(?P<display>[^,\\\\\\}]+?))\\}(?P<last>.)',
- re.DOTALL)
-node_include_re = re.compile (r'(?m)^@(node|include)\s+(.+?)$')
-
-whitespace_re = re.compile (r'\s+')
-line_start_re = re.compile ('(?m)^')
-
-def which_line (index, newline_indices):
- """Calculate line number of a given string index
-
-Return line number of string index index, where
-newline_indices is an ordered iterable of all newline indices.
-"""
- inf = 0
- sup = len (newline_indices) - 1
- n = len (newline_indices)
- while inf + 1 != sup:
- m = (inf + sup) / 2
- if index >= newline_indices [m]:
- inf = m
- else:
- sup = m
- return inf + 1
-
-
-comments_re = re.compile ('(?<!@)(@c(?:omment)? \
-.*?\\n|^@ignore\\n.*?\\n@end ignore\\n)', re.M | re.S)
-
-def calc_comments_boundaries (texinfo_doc):
- return [(m.start (), m.end ()) for m in comments_re.finditer (texinfo_doc)]
-
-
-def is_commented_out (start, end, comments_boundaries):
- for k in range (len (comments_boundaries)):
- if (start > comments_boundaries[k][0]
- and end <= comments_boundaries[k][1]):
- return True
- elif end <= comments_boundaries[k][0]:
- return False
- return False
-
-
-def read_file (f, d):
- s = open (f).read ()
- base = os.path.basename (f)
- dir = os.path.dirname (f)
-
- d['contents'][f] = s
-
- d['newline_indices'][f] = [m.end () for m in line_start_re.finditer (s)]
- if options.check_comments:
- d['comments_boundaries'][f] = []
- else:
- d['comments_boundaries'][f] = calc_comments_boundaries (s)
-
- for m in node_include_re.finditer (s):
- if m.group (1) == 'node':
- line = which_line (m.start (), d['newline_indices'][f])
- d['nodes'][m.group (2)] = (f, line)
-
- elif m.group (1) == 'include':
- try:
- p = find_file (m.group (2), dir)
- except EnvironmentError, (errno, strerror):
- if strerror == file_not_found:
- continue
- else:
- raise
- read_file (p, d)
-
-
-def read_manual (name):
- """Look for all node names and cross-references in a Texinfo document
-
-Return a (manual, dictionary) tuple where manual is the cross-reference
-macro name defined by references_dict[name], and dictionary
-has the following keys:
-
- 'nodes' is a dictionary of `node name':(file name, line number),
-
- 'contents' is a dictionary of file:`full file contents',
-
- 'newline_indices' is a dictionary of
-file:[list of beginning-of-line string indices],
-
- 'comments_boundaries' is a list of (start, end) tuples,
-which contain string indices of start and end of each comment.
-
-Included files that can be found in the include path are processed too.
-
-"""
- d = {}
- d['nodes'] = {}
- d['contents'] = {}
- d['newline_indices'] = {}
- d['comments_boundaries'] = {}
- manual = manuals_defs.references_dict.get (name, '')
- try:
- f = find_file (name + '.tely')
- except EnvironmentError, (errno, strerror):
- if not strerror == file_not_found:
- raise
- else:
- try:
- f = find_file (name + '.texi')
- except EnvironmentError, (errno, strerror):
- if strerror == file_not_found:
- sys.stderr.write (name + '.{texi,tely}: ' +
- file_not_found + '\n')
- return (manual, d)
- else:
- raise
-
- log.write ("Processing manual %s (%s)\n" % (f, manual))
- read_file (f, d)
- return (manual, d)
-
-
-log.write ("Reading files...\n")
-
-manuals = dict ([read_manual (name)
- for name in manuals_defs.references_dict.keys ()])
-
-ref_fixes = set ()
-bad_refs_count = 0
-fixes_count = 0
-
-def add_fix (old_type, old_ref, new_type, new_ref):
- ref_fixes.add ((old_type, old_ref, new_type, new_ref))
-
-
-def lookup_fix (r):
- found = []
- for (old_type, old_ref, new_type, new_ref) in ref_fixes:
- if r == old_ref:
- found.append ((new_type, new_ref))
- return found
-
-
-def preserve_linebreak (text, linebroken):
- if linebroken:
- if ' ' in text:
- text = text.replace (' ', '\n', 1)
- n = ''
- else:
- n = '\n'
- else:
- n = ''
- return (text, n)
-
-
-def choose_in_numbered_list (message, string_list, sep=' ', retries=3):
- S = set (string_list)
- S.discard ('')
- string_list = list (S)
- numbered_list = sep.join ([str (j + 1) + '. ' + string_list[j]
- for j in range (len (string_list))]) + '\n'
- t = retries
- while t > 0:
- value = ''
- stdout.write (message +
- "(press Enter to discard and start a new search)\n")
- input = raw_input (numbered_list)
- if not input:
- return ''
- try:
- value = string_list[int (input) - 1]
- except IndexError:
- stdout.write ("Error: index number out of range\n")
- except ValueError:
- matches = [input in v for v in string_list]
- n = matches.count (True)
- if n == 0:
- stdout.write ("Error: input matches no item in the list\n")
- elif n > 1:
- stdout.write ("Error: ambiguous input (matches several items \
-in the list)\n")
- else:
- value = string_list[matches.index (True)]
- if value:
- return value
- t -= 1
- raise InteractionError ("%d retries limit exceeded" % retries)
-
-refs_count = 0
-
-def check_ref (manual, file, m):
- global fixes_count, bad_refs_count, refs_count
- refs_count += 1
- bad_ref = False
- fixed = True
- type = m.group (1)
- original_name = m.group ('ref') or m.group ('refname')
- name = whitespace_re.sub (' ', original_name). strip ()
- newline_indices = manuals[manual]['newline_indices'][file]
- line = which_line (m.start (), newline_indices)
- linebroken = '\n' in original_name
- original_display_name = m.group ('display')
- next_char = m.group ('last')
- if original_display_name: # the xref has an explicit display name
- display_linebroken = '\n' in original_display_name
- display_name = whitespace_re.sub (' ', original_display_name). strip ()
- commented_out = is_commented_out \
- (m.start (), m.end (), manuals[manual]['comments_boundaries'][file])
- useful_fix = not outdir in file
-
- # check puncuation after x-ref
- if options.check_punctuation and not next_char in '.,;:!?':
- stdout.write ("Warning: %s: %d: `%s': x-ref \
-not followed by punctuation\n" % (file, line, name))
-
- # validate xref
- explicit_type = type
- new_name = name
-
- if type != 'ref' and type == manual and not commented_out:
- if useful_fix:
- fixed = False
- bad_ref = True
- stdout.write ("\n%s: %d: `%s': external %s x-ref should be internal\n"
- % (file, line, name, type))
- if options.auto_fix or yes_prompt ("Fix this?"):
- type = 'ref'
-
- if type == 'ref':
- explicit_type = manual
-
- if not name in manuals[explicit_type]['nodes'] and not commented_out:
- bad_ref = True
- fixed = False
- stdout.write ('\n')
- if type == 'ref':
- stdout.write ("\e[1;31m%s: %d: `%s': wrong internal x-ref\e[0m\n"
- % (file, line, name))
- else:
- stdout.write ("\e[1;31m%s: %d: `%s': wrong external `%s' x-ref\e[0m\n"
- % (file, line, name, type))
- # print context
- stdout.write ('--\n' + manuals[manual]['contents'][file]
- [newline_indices[max (0, line - 2)]:
- newline_indices[min (line + 3,
- len (newline_indices) - 1)]] +
- '--\n')
-
- # try to find the reference in other manuals
- found = []
- for k in [k for k in manuals if k != explicit_type]:
- if name in manuals[k]['nodes']:
- if k == manual:
- found = ['ref']
- stdout.write ("\e[1;32m found as internal x-ref\e[0m\n")
- break
- else:
- found.append (k)
- stdout.write ("\e[1;32m found as `%s' x-ref\e[0m\n" % k)
-
- if (len (found) == 1
- and (options.auto_fix or yes_prompt ("Fix this x-ref?"))):
- add_fix (type, name, found[0], name)
- type = found[0]
- fixed = True
-
- elif len (found) > 1 and useful_fix:
- if options.interactive or options.auto_fix:
- stdout.write ("* Several manuals contain this node name, \
-cannot determine manual automatically.\n")
- if options.interactive:
- t = choose_in_numbered_list ("Choose manual for this x-ref by \
-index number or beginning of name:\n", found)
- if t:
- add_fix (type, name, t, name)
- type = t
- fixed = True
-
- if not fixed:
- # try to find a fix already made
- found = lookup_fix (name)
-
- if len (found) == 1:
- stdout.write ("Found one previous fix: %s `%s'\n" % found[0])
- if options.auto_fix or yes_prompt ("Apply this fix?"):
- type, new_name = found[0]
- fixed = True
-
- elif len (found) > 1:
- if options.interactive or options.auto_fix:
- stdout.write ("* Several previous fixes match \
-this node name, cannot fix automatically.\n")
- if options.interactive:
- concatened = choose_in_numbered_list ("Choose new manual \
-and x-ref by index number or beginning of name:\n", [''.join ([i[0], ' ', i[1]])
- for i in found],
- sep='\n')
- if concatened:
- type, new_name = concatenated.split (' ', 1)
- fixed = True
-
- if not fixed:
- # all previous automatic fixing attempts failed,
- # ask user for substring to look in node names
- while True:
- node_list = search_prompt ()
- if node_list == None:
- if options.interactive:
- stdout.write (warn_not_fixed)
- break
- elif not node_list:
- stdout.write ("No matched node names.\n")
- else:
- concatenated = choose_in_numbered_list ("Choose \
-node name and manual for this x-ref by index number or beginning of name:\n", \
- [' '.join ([i[0], i[1], '(in %s)' % i[2]])
- for i in node_list],
- sep='\n')
- if concatenated:
- t, z = concatenated.split (' ', 1)
- new_name = z.split (' (in ', 1)[0]
- add_fix (type, name, t, new_name)
- type = t
- fixed = True
- break
-
- if fixed and type == manual:
- type = 'ref'
- bad_refs_count += int (bad_ref)
- if bad_ref and not useful_fix:
- stdout.write ("*** Warning: this file is automatically generated, \
-please fix the code source instead of generated documentation.\n")
-
- # compute returned string
- if new_name == name:
- if bad_ref and (options.interactive or options.auto_fix):
- # only the type of the ref was fixed
- fixes_count += int (fixed)
- if original_display_name:
- return ('@%snamed{%s,%s}' % (type, original_name, original_display_name)) + next_char
- else:
- return ('@%s{%s}' % (type, original_name)) + next_char
- else:
- fixes_count += int (fixed)
- (ref, n) = preserve_linebreak (new_name, linebroken)
- if original_display_name:
- if bad_ref:
- stdout.write ("Current display name is `%s'\n")
- display_name = raw_input \
- ("Enter a new display name or press enter to keep the existing name:\n") \
- or display_name
- (display_name, n) = preserve_linebreak (display_name, display_linebroken)
- else:
- display_name = original_display_name
- return ('@%snamed{%s,%s}' % (type, ref, display_name)) + \
- next_char + n
- else:
- return ('@%s{%s}' % (type, ref)) + next_char + n
-
-
-log.write ("Checking cross-references...\n")
-
-try:
- for key in manuals:
- for file in manuals[key]['contents']:
- s = ref_re.sub (lambda m: check_ref (key, file, m),
- manuals[key]['contents'][file])
- if s != manuals[key]['contents'][file]:
- open (file, 'w').write (s)
-except KeyboardInterrupt:
- log.write ("Operation interrupted, exiting.\n")
- sys.exit (2)
-except InteractionError, instance:
- log.write ("Operation refused by user: %s\nExiting.\n" % instance)
- sys.exit (3)
-
-log.write ("\e[1;36mDone: %d x-refs found, %d bad x-refs found, fixed %d.\e[0m\n" %
- (refs_count, bad_refs_count, fixes_count))
+++ /dev/null
-#!/usr/bin/env python
-
-import __main__
-import optparse
-import os
-import sys
-
-import langdefs
-import buildlib
-
-verbose = 0
-use_colors = False
-lang = 'C'
-C = lang
-
-def dir_lang (file, lang, lang_dir_index):
- path_components = file.split ('/')
- path_components[lang_dir_index] = lang
- return os.path.join (*path_components)
-
-def do_file (file_name, lang_codes, buildlib):
- if verbose:
- sys.stderr.write ('%s...\n' % file_name)
- split_file_name = file_name.split ('/')
- d1, d2 = split_file_name[0:2]
- if d1 in lang_codes:
- check_lang = d1
- lang_dir_index = 0
- elif d2 in lang_codes:
- check_lang = d2
- lang_dir_index = 1
- else:
- check_lang = lang
- if check_lang == C:
- raise Exception ('cannot determine language for ' + file_name)
-
- original = dir_lang (file_name, '', lang_dir_index)
- translated_contents = open (file_name).read ()
- (diff_string, error) \
- = buildlib.check_translated_doc (original,
- file_name,
- translated_contents,
- color=use_colors and not update_mode)
-
- if error:
- sys.stderr.write ('warning: %s: %s' % (file_name, error))
-
- if update_mode:
- if error or len (diff_string) >= os.path.getsize (original):
- buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + original)
- elif diff_string:
- diff_file = original + '.diff'
- f = open (diff_file, 'w')
- f.write (diff_string)
- f.close ()
- buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + diff_file)
- os.remove (diff_file)
- else:
- sys.stdout.write (diff_string)
-
-def usage ():
- sys.stdout.write (r'''
-Usage:
-check-translation [--language=LANG] [--verbose] [--update] FILE...
-
-This script is licensed under the GNU GPL.
-''')
-
-def do_options ():
- global lang, verbose, update_mode, use_colors
-
- p = optparse.OptionParser (usage="check-translation [--language=LANG] [--verbose] FILE...",
- description="This script is licensed under the GNU GPL.")
- p.add_option ("--language",
- action='store',
- default='site',
- dest="language")
- p.add_option ("--no-color",
- action='store_false',
- default=True,
- dest="color",
- help="do not print ANSI-cooured output")
- p.add_option ("--verbose",
- action='store_true',
- default=False,
- dest="verbose",
- help="print details, including executed shell commands")
- p.add_option ('-u', "--update",
- action='store_true',
- default=False,
- dest='update_mode',
- help='call $EDITOR to update the translation')
-
- (options, files) = p.parse_args ()
- verbose = options.verbose
- lang = options.language
- use_colors = options.color
- update_mode = options.update_mode
-
- return files
-
-def main ():
- global update_mode, text_editor
-
- files = do_options ()
- if 'EDITOR' in os.environ:
- text_editor = os.environ['EDITOR']
- else:
- update_mode = False
-
- buildlib.verbose = verbose
-
- for i in files:
- do_file (i, langdefs.LANGDICT.keys (), buildlib)
-
-if __name__ == '__main__':
- main ()
+++ /dev/null
-#!/usr/bin/python
-
-import os
-import glob
-import re
-import sys
-import optparse
-
-#File 'accidental-engraver.cc'
-#Lines executed:87.70% of 252
-
-def summary (args):
- results = []
- for f in args:
- str = open (f).read ()
- m = re.search ("File '([^']+.cc)'\s*Lines executed:([0-9.]+)% of ([0-9]+)", str)
-
- if m and '/usr/lib' in m.group (1):
- continue
-
- if m:
- cov = float (m.group (2))
- lines = int (m.group (3))
- pain = lines * (100.0 - cov)
- file = m.group (1)
- tup = (pain, locals ().copy())
-
- results.append(tup)
-
- results.sort ()
- results.reverse()
-
- print 'files sorted by number of untested lines (decreasing)'
- print
- print '%5s (%6s): %s' % ('cov %', 'lines', 'file')
- print '----------------------------------------------'
-
- for (pain, d) in results:
- print '%(cov)5.2f (%(lines)6d): %(file)s' % d
-
-class Chunk:
- def __init__ (self, range, coverage_count, all_lines, file):
- assert coverage_count >= 0
- assert type (range) == type (())
-
- self.coverage_count = coverage_count
- self.range = range
- self.all_lines = all_lines
- self.file = file
-
- def length (self):
- return self.range[1] - self.range[0]
-
- def text (self):
- return ''.join ([l[2] for l in self.lines()])
-
- def lines (self):
- return self.all_lines[self.range[0]:
- self.range[1]]
- def widen (self):
- self.range = (min (self.range[0] -1, 0),
- self.range[0] +1)
- def write (self):
- print 'chunk in', self.file
- for (c, n, l) in self.lines ():
- cov = '%d' % c
- if c == 0:
- cov = '#######'
- elif c < 0:
- cov = ''
- sys.stdout.write ('%8s:%8d:%s' % (cov, n, l))
-
- def uncovered_score (self):
- return self.length ()
-
-class SchemeChunk (Chunk):
- def uncovered_score (self):
- text = self.text ()
- if (text.startswith ('(define ')
- and not text.startswith ('(define (')):
- return 0
-
- if text.startswith ('(use-modules '):
- return 0
-
- if (text.startswith ('(define-public ')
- and not text.startswith ('(define-public (')):
- return 0
-
- return len ([l for (c,n,l) in self.lines() if (c == 0)])
-
-def read_gcov (f):
- ls = []
-
- in_lines = [l for l in open (f).readlines ()]
- (count_len, line_num_len) = tuple (map (len, in_lines[0].split (':')[:2]))
-
- for l in in_lines:
- c = l[:count_len].strip ()
- l = l[count_len+1:]
- n = int (l[:line_num_len].strip ())
-
- if n == 0:
- continue
-
- if '#' in c:
- c = 0
- elif c == '-':
- c = -1
- else:
- c = int (c)
-
- l = l[line_num_len+1:]
-
- ls.append ((c,n,l))
-
- return ls
-
-def get_c_chunks (ls, file):
- chunks = []
- chunk = []
-
- last_c = -1
- for (c, n, l) in ls:
- if not (c == last_c or c < 0 and l != '}\n'):
- if chunk and last_c >= 0:
- nums = [n-1 for (n, l) in chunk]
- chunks.append (Chunk ((min (nums), max (nums)+1),
- last_c, ls, file))
- chunk = []
-
- chunk.append ((n,l))
- if c >= 0:
- last_c = c
-
- return chunks
-
-def get_scm_chunks (ls, file):
- chunks = []
- chunk = []
-
- def new_chunk ():
- if chunk:
- nums = [n-1 for (n, l) in chunk]
- chunks.append (SchemeChunk ((min (nums), max (nums)+1),
- max (last_c, 0), ls, file))
- chunk[:] = []
-
- last_c = -1
- for (cov_count, line_number, line) in ls:
- if line.startswith ('('):
- new_chunk ()
- last_c = -1
-
- chunk.append ((line_number, line))
- if cov_count >= 0:
- last_c = cov_count
-
- return chunks
-
-def widen_chunk (ch, ls):
- a -= 1
- b += 1
-
- return [(n, l) for (c, n, l) in ls[a:b]]
-
-
-def extract_chunks (file):
- try:
- ls = read_gcov (file)
- except IOError, s :
- print s
- return []
-
- cs = []
- if 'scm' in file:
- cs = get_scm_chunks (ls, file)
- else:
- cs = get_c_chunks (ls, file)
- return cs
-
-
-def filter_uncovered (chunks):
- def interesting (c):
- if c.coverage_count > 0:
- return False
-
- t = c.text()
- for stat in ('warning', 'error', 'print', 'scm_gc_mark'):
- if stat in t:
- return False
- return True
-
- return [c for c in chunks if interesting (c)]
-
-
-def main ():
- p = optparse.OptionParser (usage="usage coverage.py [options] files",
- description="")
- p.add_option ("--summary",
- action='store_true',
- default=False,
- dest="summary")
-
- p.add_option ("--hotspots",
- default=False,
- action='store_true',
- dest="hotspots")
-
- p.add_option ("--uncovered",
- default=False,
- action='store_true',
- dest="uncovered")
-
-
- (options, args) = p.parse_args ()
-
-
- if options.summary:
- summary (['%s.gcov-summary' % s for s in args])
-
- if options.uncovered or options.hotspots:
- chunks = []
- for a in args:
- name = a
- if name.endswith ('scm'):
- name += '.cov'
- else:
- name += '.gcov'
-
- chunks += extract_chunks (name)
-
- if options.uncovered:
- chunks = filter_uncovered (chunks)
- chunks = [(c.uncovered_score (), c) for c in chunks if c.uncovered_score() > 0]
- elif options.hotspots:
- chunks = [((c.coverage_count, -c.length()), c) for c in chunks]
-
-
- chunks.sort ()
- chunks.reverse ()
- for (score, c) in chunks:
- c.write ()
-
-
-
-if __name__ == '__main__':
- main ()
+++ /dev/null
-#!@PYTHON@
-# -*- coding: utf-8 -*-
-# extract_texi_filenames.py
-
-# USAGE: extract_texi_filenames.py [-o OUTDIR] FILES
-#
-# -o OUTDIR specifies that output files should rather be written in OUTDIR
-#
-# Description:
-# This script parses the .texi file given and creates a file with the
-# nodename <=> filename/anchor map.
-# The idea behind: Unnumbered subsections go into the same file as the
-# previous numbered section, @translationof gives the original node name,
-# which is then used for the filename/anchor.
-#
-# If this script is run on a file texifile.texi, it produces a file
-# texifile[.LANG].xref-map with tab-separated entries of the form
-# NODE\tFILENAME\tANCHOR
-# LANG is the document language in case it's not 'en'
-# Note: The filename does not have any extension appended!
-# This file can then be used by our texi2html init script to determine
-# the correct file name and anchor for external refs
-
-import sys
-import re
-import os
-import getopt
-
-optlist, args = getopt.getopt (sys.argv[1:],'o:')
-files = args
-
-outdir = '.'
-for x in optlist:
- if x[0] == '-o':
- outdir = x[1]
-
-if not os.path.isdir (outdir):
- if os.path.exists (outdir):
- os.unlink (outdir)
- os.makedirs (outdir)
-
-include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M)
-whitespaces = re.compile (r'\s+')
-section_translation_re = re.compile ('^@(node|(?:unnumbered|appendix)\
-(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|\
-(?:major|chap|(?:sub){0,2})heading|translationof) (.*?)\\s*$', re.MULTILINE)
-
-def expand_includes (m, filename):
- filepath = os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'
- if os.path.exists (filepath):
- return extract_sections (filepath)[1]
- else:
- print "Unable to locate include file " + filepath
- return ''
-
-lang_re = re.compile (r'^@documentlanguage (.+)', re.M)
-
-def extract_sections (filename):
- result = ''
- f = open (filename, 'r')
- page = f.read ()
- f.close()
- # Search document language
- m = lang_re.search (page)
- if m and m.group (1) != 'en':
- lang_suffix = '.' + m.group (1)
- else:
- lang_suffix = ''
- # Replace all includes by their list of sections and extract all sections
- page = include_re.sub (lambda m: expand_includes (m, filename), page)
- sections = section_translation_re.findall (page)
- for sec in sections:
- result += "@" + sec[0] + " " + sec[1] + "\n"
- return (lang_suffix, result)
-
-# Convert a given node name to its proper file name (normalization as explained
-# in the texinfo manual:
-# http://www.gnu.org/software/texinfo/manual/texinfo/html_node/HTML-Xref-Node-Name-Expansion.html
-def texinfo_file_name(title):
- # exception: The top node is always mapped to index.html
- if title == "Top":
- return "index"
- # File name normalization by texinfo (described in the texinfo manual):
- # 1/2: letters and numbers are left unchanged
- # 3/4: multiple, leading and trailing whitespace is removed
- title = title.strip ();
- title = whitespaces.sub (' ', title)
- # 5: all remaining spaces are converted to '-'
- # 6: all other 7- or 8-bit chars are replaced by _xxxx (xxxx=ascii character code)
- result = ''
- for index in range(len(title)):
- char = title[index]
- if char == ' ': # space -> '-'
- result += '-'
- elif ( ('0' <= char and char <= '9' ) or
- ('A' <= char and char <= 'Z' ) or
- ('a' <= char and char <= 'z' ) ): # number or letter
- result += char
- else:
- ccode = ord(char)
- if ccode <= 0xFFFF:
- result += "_%04x" % ccode
- else:
- result += "__%06x" % ccode
- # 7: if name begins with number, prepend 't_g' (so it starts with a letter)
- if (result != '') and (ord(result[0]) in range (ord('0'), ord('9'))):
- result = 't_g' + result
- return result
-
-texinfo_re = re.compile (r'@.*{(.*)}')
-def remove_texinfo (title):
- return texinfo_re.sub (r'\1', title)
-
-def create_texinfo_anchor (title):
- return texinfo_file_name (remove_texinfo (title))
-
-unnumbered_re = re.compile (r'unnumbered.*')
-def process_sections (filename, lang_suffix, page):
- sections = section_translation_re.findall (page)
- basename = os.path.splitext (os.path.basename (filename))[0]
- p = os.path.join (outdir, basename) + lang_suffix + '.xref-map'
- f = open (p, 'w')
-
- this_title = ''
- this_filename = 'index'
- this_anchor = ''
- this_unnumbered = False
- had_section = False
- for sec in sections:
- if sec[0] == "node":
- # Write out the cached values to the file and start a new section:
- if this_title != '' and this_title != 'Top':
- f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
- had_section = False
- this_title = remove_texinfo (sec[1])
- this_anchor = create_texinfo_anchor (sec[1])
- elif sec[0] == "translationof":
- anchor = create_texinfo_anchor (sec[1])
- # If @translationof is used, it gives the original node name, which
- # we use for the anchor and the file name (if it is a numbered node)
- this_anchor = anchor
- if not this_unnumbered:
- this_filename = anchor
- else:
- # Some pages might not use a node for every section, so treat this
- # case here, too: If we already had a section and encounter enother
- # one before the next @node, we write out the old one and start
- # with the new values
- if had_section and this_title != '':
- f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
- this_title = remove_texinfo (sec[1])
- this_anchor = create_texinfo_anchor (sec[1])
- had_section = True
-
- # unnumbered nodes use the previously used file name, only numbered
- # nodes get their own filename! However, top-level @unnumbered
- # still get their own file.
- this_unnumbered = unnumbered_re.match (sec[0])
- if not this_unnumbered or sec[0] == "unnumbered":
- this_filename = this_anchor
-
- if this_title != '' and this_title != 'Top':
- f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
- f.close ()
-
-
-for filename in files:
- print "extract_texi_filenames.py: Processing %s" % filename
- (lang_suffix, sections) = extract_sections (filename)
- process_sections (filename, lang_suffix, sections)
+++ /dev/null
-#!/usr/bin/python
-import sys
-import re
-import os
-
-
-full_paths = {}
-incs = {}
-inc_re = re.compile ('^#include "([^"]+)"')
-def parse_file (fn):
- lst = []
-
- lc = 0
- for l in open (fn).readlines():
- lc += 1
- m = inc_re.search (l)
- if m:
- lst.append ((lc, m.group (1)))
-
- base = os.path.split (fn)[1]
- full_paths[base] = fn
- incs[base] = lst
-
-
-def has_include (f, name):
- try:
- return name in [b for (a,b) in incs[f]]
- except KeyError:
- return False
-
-for a in sys.argv:
- parse_file (a)
-
-print '-*-compilation-*-'
-for (f, lst) in incs.items ():
- for (n, inc) in lst:
- for (n2, inc2) in lst:
- if has_include (inc2, inc):
- print "%s:%d: already have %s from %s" % (full_paths[f], n,
- inc, inc2)
- break
-
-
-
+++ /dev/null
-#!/usr/bin/python
-
-# fixcc -- nitpick lily's c++ code
-
-# TODO
-# * maintainable rules: regexp's using whitespace (?x) and match names
-# <identifier>)
-# * trailing `*' vs. function definition
-# * do not break/change indentation of fixcc-clean files
-# * check lexer, parser
-# * rewrite in elisp, add to cc-mode
-# * using regexes is broken by design
-# * ?
-# * profit
-
-import __main__
-import getopt
-import os
-import re
-import string
-import sys
-import time
-
-COMMENT = 'COMMENT'
-STRING = 'STRING'
-GLOBAL_CXX = 'GC++'
-CXX = 'C++'
-verbose_p = 0
-indent_p = 0
-
-rules = {
- GLOBAL_CXX:
- [
- # delete gratuitous block
- ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
- '\n\\2;'),
- ],
- CXX:
- [
- # space before parenthesis open
- ('([^\( \]])[ \t]*\(', '\\1 ('),
- # space after comma
- ("\([^'],\)[ \t]*", '\1 '),
- # delete gratuitous block
- ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
- '\n\\2;'),
- # delete inline tabs
- ('(\w)\t+', '\\1 '),
- # delete inline double spaces
- (' *', ' '),
- # delete space after parenthesis open
- ('\([ \t]*', '('),
- # delete space before parenthesis close
- ('[ \t]*\)', ')'),
- # delete spaces after prefix
- ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'),
- # delete spaces before postfix
- ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'),
- # delete space after parenthesis close
- #('\)[ \t]*([^\w])', ')\\1'),
- # delete space around operator
- # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
- ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
- # delete space after operator
- ('(::)([ \t]*)([\w\(\)])', '\\1\\3'),
- # delete superflous space around operator
- ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'),
- # space around operator1
- ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
- # space around operator2
- ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'),
- # space around operator3
- ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
- # space around operator4
- ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'),
- # space around +/-; exponent
- ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'),
- ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'),
- # trailing operator
- (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '),
- # pointer
- ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
- ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
- #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'),
- # pointer with template
- ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'),
- #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'),
- # unary pointer, minus, not
- ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'),
- # space after `operator'
- ('(\Woperator) *([^\w\s])', '\\1 \\2'),
- # dangling brace close
- ('\n[ \t]*(\n[ \t]*})', '\\1'),
- # dangling newline
- ('\n[ \t]*\n[ \t]*\n', '\n\n'),
- # dangling parenthesis open
- #('[ \t]*\n[ \t]*\([ \t]*\n', '('),
- ('\([ \t]*\n', '('),
- # dangling parenthesis close
- ('\n[ \t]*\)', ')'),
- # dangling comma
- ('\n[ \t]*,', ','),
- # dangling semicolon
- ('\n[ \t]*;', ';'),
- # brace open
- ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'),
- # brace open backslash
- ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'),
- # brace close
- ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'),
- # brace close backslash
- ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'),
- # delete space after `operator'
- #('(\Woperator) (\W)', '\\1\\2'),
- # delete space after case, label
- ('(\W(case|label) ([\w]+)) :', '\\1:'),
- # delete space before comma
- ('[ \t]*,', ','),
- # delete space before semicolon
- ('[ \t]*;', ';'),
- # delete space before eol-backslash
- ('[ \t]*\\\\\n', '\\\n'),
- # delete trailing whitespace
- ('[ \t]*\n', '\n'),
-
- ## Deuglify code that also gets ugly by rules above.
- # delete newline after typedef struct
- ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'),
- # delete spaces around template brackets
- #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'),
- ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'),
- ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'),
- ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'),
- ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'),
- # do {..} while
- ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'),
-
- ## Fix code that gets broken by rules above.
- ##('->\s+\*', '->*'),
- # delete space before #define x()
- ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('),
- # add space in #define x ()
- ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)',
- '#define \\1 \\2'),
- # delete space in #include <>
- ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>',
- '#include <\\1\\2\\3>'),
- # delete backslash before empty line (emacs' indent region is broken)
- ('\\\\\n\n', '\n\n'),
- ],
-
- COMMENT:
- [
- # delete trailing whitespace
- ('[ \t]*\n', '\n'),
- # delete empty first lines
- ('(/\*\n)\n*', '\\1'),
- # delete empty last lines
- ('\n*(\n\*/)', '\\1'),
- ## delete newline after start?
- #('/(\*)\n', '\\1'),
- ## delete newline before end?
- #('\n(\*/)', '\\1'),
- ],
- }
-
-# Recognize special sequences in the input.
-#
-# (?P<name>regex) -- Assign result of REGEX to NAME.
-# *? -- Match non-greedily.
-# (?m) -- Multiline regex: Make ^ and $ match at each line.
-# (?s) -- Make the dot match all characters including newline.
-# (?x) -- Ignore whitespace in patterns.
-no_match = 'a\ba'
-snippet_res = {
- CXX: {
- 'multiline_comment':
- r'''(?sx)
- (?P<match>
- (?P<code>
- [ \t]*/\*.*?\*/))''',
-
- 'singleline_comment':
- r'''(?mx)
- ^.*
- (?P<match>
- (?P<code>
- [ \t]*//([ \t][^\n]*|)\n))''',
-
- 'string':
- r'''(?x)
- (?P<match>
- (?P<code>
- "([^\"\n](\")*)*"))''',
-
- 'char':
- r'''(?x)
- (?P<match>
- (?P<code>
- '([^']+|\')))''',
-
- 'include':
- r'''(?x)
- (?P<match>
- (?P<code>
- "#[ \t]*include[ \t]*<[^>]*>''',
- },
- }
-
-class Chunk:
- def replacement_text (self):
- return ''
-
- def filter_text (self):
- return self.replacement_text ()
-
-class Substring (Chunk):
- def __init__ (self, source, start, end):
- self.source = source
- self.start = start
- self.end = end
-
- def replacement_text (self):
- s = self.source[self.start:self.end]
- if verbose_p:
- sys.stderr.write ('CXX Rules')
- for i in rules[CXX]:
- if verbose_p:
- sys.stderr.write ('.')
- #sys.stderr.write ('\n\n***********\n')
- #sys.stderr.write (i[0])
- #sys.stderr.write ('\n***********\n')
- #sys.stderr.write ('\n=========>>\n')
- #sys.stderr.write (s)
- #sys.stderr.write ('\n<<=========\n')
- s = re.sub (i[0], i[1], s)
- if verbose_p:
- sys.stderr.write ('done\n')
- return s
-
-
-class Snippet (Chunk):
- def __init__ (self, type, match, format):
- self.type = type
- self.match = match
- self.hash = 0
- self.options = []
- self.format = format
-
- def replacement_text (self):
- return self.match.group ('match')
-
- def substring (self, s):
- return self.match.group (s)
-
- def __repr__ (self):
- return `self.__class__` + ' type = ' + self.type
-
-class Multiline_comment (Snippet):
- def __init__ (self, source, match, format):
- self.type = type
- self.match = match
- self.hash = 0
- self.options = []
- self.format = format
-
- def replacement_text (self):
- s = self.match.group ('match')
- if verbose_p:
- sys.stderr.write ('COMMENT Rules')
- for i in rules[COMMENT]:
- if verbose_p:
- sys.stderr.write ('.')
- s = re.sub (i[0], i[1], s)
- return s
-
-snippet_type_to_class = {
- 'multiline_comment': Multiline_comment,
-# 'string': Multiline_comment,
-# 'include': Include_snippet,
-}
-
-def find_toplevel_snippets (s, types):
- if verbose_p:
- sys.stderr.write ('Dissecting')
-
- res = {}
- for i in types:
- res[i] = re.compile (snippet_res[format][i])
-
- snippets = []
- index = 0
- ## found = dict (map (lambda x: (x, None),
- ## types))
- ## urg python2.1
- found = {}
- map (lambda x, f = found: f.setdefault (x, None),
- types)
-
- # We want to search for multiple regexes, without searching
- # the string multiple times for one regex.
- # Hence, we use earlier results to limit the string portion
- # where we search.
- # Since every part of the string is traversed at most once for
- # every type of snippet, this is linear.
-
- while 1:
- if verbose_p:
- sys.stderr.write ('.')
- first = None
- endex = 1 << 30
- for type in types:
- if not found[type] or found[type][0] < index:
- found[type] = None
- m = res[type].search (s[index:endex])
- if not m:
- continue
-
- cl = Snippet
- if snippet_type_to_class.has_key (type):
- cl = snippet_type_to_class[type]
- snip = cl (type, m, format)
- start = index + m.start ('match')
- found[type] = (start, snip)
-
- if found[type] \
- and (not first \
- or found[type][0] < found[first][0]):
- first = type
-
- # FIXME.
-
- # Limiting the search space is a cute
- # idea, but this *requires* to search
- # for possible containing blocks
- # first, at least as long as we do not
- # search for the start of blocks, but
- # always/directly for the entire
- # @block ... @end block.
-
- endex = found[first][0]
-
- if not first:
- snippets.append (Substring (s, index, len (s)))
- break
-
- (start, snip) = found[first]
- snippets.append (Substring (s, index, start))
- snippets.append (snip)
- found[first] = None
- index = start + len (snip.match.group ('match'))
-
- return snippets
-
-def nitpick_file (outdir, file):
- s = open (file).read ()
-
- for i in rules[GLOBAL_CXX]:
- s = re.sub (i[0], i[1], s)
-
- # FIXME: Containing blocks must be first, see
- # find_toplevel_snippets.
- # We leave simple strings be part of the code
- snippet_types = (
- 'multiline_comment',
- 'singleline_comment',
- 'string',
-# 'char',
- )
-
- chunks = find_toplevel_snippets (s, snippet_types)
- #code = filter (lambda x: is_derived_class (x.__class__, Substring),
- # chunks)
-
- t = string.join (map (lambda x: x.filter_text (), chunks), '')
- fixt = file
- if s != t:
- if not outdir:
- os.system ('mv %s %s~' % (file, file))
- else:
- fixt = os.path.join (outdir,
- os.path.basename (file))
- h = open (fixt, "w")
- h.write (t)
- h.close ()
- if s != t or indent_p:
- indent_file (fixt)
-
-def indent_file (file):
- emacs = '''emacs\
- --no-window-system\
- --batch\
- --no-site-file\
- --no-init-file\
- %(file)s\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "cc-mode")
- (c++-mode)
- (indent-region (point-min) (point-max))
- (if (buffer-modified-p (current-buffer))
- (save-buffer)))' ''' % vars ()
- emacsclient = '''emacsclient\
- --socket-name=%(socketdir)s/%(socketname)s\
- --no-wait\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "cc-mode")
- (find-file "%(file)s")
- (c++-mode)
- (indent-region (point-min) (point-max))
- (if (buffer-modified-p (current-buffer))
- (save-buffer)))' ''' \
- % { 'file': file,
- 'socketdir' : socketdir,
- 'socketname' : socketname, }
- if verbose_p:
- sys.stderr.write (emacs)
- sys.stderr.write ('\n')
- os.system (emacs)
-
-
-def usage ():
- sys.stdout.write (r'''
-Usage:
-fixcc [OPTION]... FILE...
-
-Options:
- --help
- --indent reindent, even if no changes
- --verbose
- --test
-
-Typical use with LilyPond:
-
- fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out)
-
-This script is licensed under the GNU GPL
-''')
-
-def do_options ():
- global indent_p, outdir, verbose_p
- (options, files) = getopt.getopt (sys.argv[1:], '',
- ['help', 'indent', 'outdir=',
- 'test', 'verbose'])
- for (o, a) in options:
- if o == '--help':
- usage ()
- sys.exit (0)
- elif o == '--indent':
- indent_p = 1
- elif o == '--outdir':
- outdir = a
- elif o == '--verbose':
- verbose_p = 1
- elif o == '--test':
- test ()
- sys.exit (0)
- else:
- assert unimplemented
- if not files:
- usage ()
- sys.exit (2)
- return files
-
-
-outdir = 0
-format = CXX
-socketdir = '/tmp/fixcc'
-socketname = 'fixcc%d' % os.getpid ()
-
-def setup_client ():
- #--no-window-system\
- #--batch\
- os.unlink (os.path.join (socketdir, socketname))
- os.mkdir (socketdir, 0700)
- emacs='''emacs\
- --no-site-file\
- --no-init-file\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "server")
- (setq server-socket-dir "%(socketdir)s")
- (setq server-name "%(socketname)s")
- (server-start)
- (while t) (sleep 1000))' ''' \
- % { 'socketdir' : socketdir,
- 'socketname' : socketname, }
-
- if not os.fork ():
- os.system (emacs)
- sys.exit (0)
- while not os.path.exists (os.path.join (socketdir, socketname)):
- time.sleep (1)
-
-def main ():
- #emacsclient should be faster, but this does not work yet
- #setup_client ()
- files = do_options ()
- if outdir and not os.path.isdir (outdir):
- os.makedirs (outdir)
- for i in files:
- sys.stderr.write ('%s...\n' % i)
- nitpick_file (outdir, i)
-
-
-## TODO: make this compilable and check with g++
-TEST = '''
-#include <libio.h>
-#include <map>
-class
-ostream ;
-
-class Foo {
-public: static char* foo ();
-std::map<char*,int>* bar (char, char) { return 0; }
-};
-typedef struct
-{
- Foo **bar;
-} String;
-
-ostream &
-operator << (ostream & os, String d);
-
-typedef struct _t_ligature
-{
- char *succ, *lig;
- struct _t_ligature * next;
-} AFM_Ligature;
-
-typedef std::map < AFM_Ligature const *, int > Bar;
-
- /**
- (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
- */
-
-/* ||
-* vv
-* !OK OK
-*/
-/* ||
- vv
- !OK OK
-*/
-char *
-Foo:: foo ()
-{
-int
-i
-;
- char* a= &++ i ;
- a [*++ a] = (char*) foe (*i, &bar) *
- 2;
- int operator double ();
- std::map<char*,int> y =*bar(-*a ,*b);
- Interval_t<T> & operator*= (T r);
- Foo<T>*c;
- int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2);
- delete *p;
- if (abs (f)*2 > abs (d) *FUDGE)
- ;
- while (0);
- for (; i<x foo(); foo>bar);
- for (; *p && > y;
- foo > bar)
-;
- do {
- ;;;
- }
- while (foe);
-
- squiggle. extent;
- 1 && * unsmob_moment (lf);
- line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm
-(): SCM_EOL);
- case foo: k;
-
- if (0) {a=b;} else {
- c=d;
- }
-
- cookie_io_functions_t Memory_out_stream::functions_ = {
- Memory_out_stream::reader,
- ...
- };
-
- int compare (Array < Pitch> *, Array < Pitch> *);
- original_ = (Grob *) & s;
- Drul_array< Link_array<Grob> > o;
-}
-
- header_.char_info_pos = (6 + header_length) * 4;
- return ly_bool2scm (*ma < * mb);
-
- 1 *::sign(2);
-
- (shift) *-d;
-
- a = 0 ? *x : *y;
-
-a = "foo() 2,2,4";
-{
- if (!span_)
- {
- span_ = make_spanner ("StaffSymbol", SCM_EOL);
- }
-}
-{
- if (!span_)
- {
- span_ = make_spanner (StaffSymbol, SCM_EOL);
- }
-}
-'''
-
-def test ():
- test_file = 'fixcc.cc'
- open (test_file, 'w').write (TEST)
- nitpick_file (outdir, test_file)
- sys.stdout.write (open (test_file).read ())
-
-if __name__ == '__main__':
- main ()
-
+++ /dev/null
-#!@PYTHON@
-import sys
-import getopt
-import re
-import os
-
-(options, files) = \
- getopt.getopt (sys.argv[1:],
- '',
- ['dir='])
-
-
-outdir = ''
-for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--dir':
- outdir = a
- else:
- print o
- raise getopt.error
-
-# Ugh
-for design_size in [11,13,14,16,18,20,23,26]:
- name = 'Emmentaler'
- filename = name.lower ()
- script = '''#!@FONTFORGE@
-
-New();
-
-# Separate Feta versioning?
-# * using 20 as Weight works for gnome-font-select widget: gfs
-
-notice = "";
-notice += "This font is distributed under the GNU General Public License. ";
-notice += "As a special exception, if you create a document which uses ";
-notice += "this font, and embed this font or unaltered portions of this ";
-notice += "font into the document, this font does not by itself cause the ";
-notice += "resulting document to be covered by the GNU General Public License.";;
-
-SetFontNames("%(name)s-%(design_size)d", "%(name)s", "%(name)s %(design_size)d", "%(design_size)d", notice, "@TOPLEVEL_VERSION@");
-
-MergeFonts("feta%(design_size)d.pfb");
-MergeFonts("parmesan%(design_size)d.pfb");
-
-# load nummer/din after setting PUA.
-i = 0;
-while (i < CharCnt())
- Select(i);
-# crashes fontforge, use PUA for now -- jcn
-# SetUnicodeValue(i + 0xF0000, 0);
-/*
-PRIVATE AREA
- In the BMP, the range 0xe000 to 0xf8ff will never be assigned to any
- characters by the standard and is reserved for private usage. For the
- Linux community, this private area has been subdivided further into the
- range 0xe000 to 0xefff which can be used individually by any end-user
- and the Linux zone in the range 0xf000 to 0xf8ff where extensions are
- coordinated among all Linux users. The registry of the characters
- assigned to the Linux zone is currently maintained by H. Peter Anvin
- <Peter.Anvin@linux.org>.
-*/
- SetUnicodeValue(i + 0xE000, 0);
- ++i;
-endloop
-
-
-MergeFonts("feta-alphabet%(design_size)d.pfb");
-MergeKern("feta-alphabet%(design_size)d.tfm");
-
-LoadTableFromFile("LILF", "%(filename)s-%(design_size)d.subfonts");
-LoadTableFromFile("LILC", "feta%(design_size)d.otf-table");
-LoadTableFromFile("LILY", "feta%(design_size)d.otf-gtable");
-
-Generate("%(filename)s-%(design_size)d.otf");
-Generate("%(filename)s-%(design_size)d.svg");
-''' % vars()
-
- basename = '%s-%d' % (filename, design_size)
- path = os.path.join (outdir, basename + '.pe')
- open (path, 'w').write (script)
-
- subfonts = ['feta%(design_size)d',
- 'parmesan%(design_size)d',
- 'feta-alphabet%(design_size)d']
-
- ns = []
- for s in subfonts:
- ns.append ('%s' % (s % vars()))
-
- subfonts_str = ' '.join (ns)
-
- open (os.path.join (outdir, '%(filename)s-%(design_size)d.subfonts' % vars()), 'w').write (subfonts_str)
-
- path = os.path.join (outdir, '%s-%d.dep' % (filename, design_size))
-
- deps = r'''%(filename)s-%(design_size)d.otf: $(outdir)/feta%(design_size)d.pfa \
- $(outdir)/parmesan%(design_size)d.pfa \
- $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-table \
- $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-gtable
-''' % vars()
- open (path, 'w').write (deps)
-
- open (os.path.join (outdir, basename + '.fontname'), 'w').write ("%s-%d" % (name, design_size))
+++ /dev/null
-#!@PYTHON@
-import os
-import sys
-import tempfile
-
-base = os.path.splitext (os.path.split (sys.argv[1])[1])[0]
-input = os.path.abspath (sys.argv[1])
-output = os.path.abspath (sys.argv[2])
-program_name= os.path.split (sys.argv[0])[1]
-
-dir = tempfile.mktemp (program_name)
-os.mkdir (dir, 0777)
-os.chdir(dir)
-
-def system (c):
- print c
- if os.system (c):
- raise 'barf'
-
-outputs = []
-for sz in [48,32,16] :
-
- for depth in [24,8]:
- out = '%(base)s-%(sz)d-%(depth)d.png' % locals()
- system ('convert -depth %(depth)d -sample %(sz)d %(input)s %(out)s' %
- locals ())
- outputs.append (out)
-
-system('icotool --output %s --create %s' % (output, ' '.join (outputs)))
-system('rm -rf %(dir)s' % locals())
-
+++ /dev/null
-#! @BASH@
-# note: dash does not work
-
-pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*2007' '\1 2007--2008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change')
-pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*([^-]*--)(200[0-7])' '\1 \2\062008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change')
+++ /dev/null
-#!@PERL@ -w
-
-# Generate a short man page from --help and --version output.
-# Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software
-# Foundation, Inc.
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2, or (at your option)
-# any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-# Written by Brendan O'Dea <bod@debian.org>
-# Available from ftp://ftp.gnu.org/gnu/help2man/
-
-use 5.005;
-use strict;
-use Getopt::Long;
-use Text::Tabs qw(expand);
-use POSIX qw(strftime setlocale LC_TIME);
-
-my $this_program = 'help2man';
-my $this_version = '1.28';
-my $version_info = <<EOT;
-GNU $this_program $this_version
-
-Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-Written by Brendan O'Dea <bod\@debian.org>
-EOT
-
-my $help_info = <<EOT;
-`$this_program' generates a man page out of `--help' and `--version' output.
-
-Usage: $this_program [OPTIONS]... EXECUTABLE
-
- -n, --name=STRING description for the NAME paragraph
- -s, --section=SECTION section number for manual page (1, 6, 8)
- -m, --manual=TEXT name of manual (User Commands, ...)
- -S, --source=TEXT source of program (FSF, Debian, ...)
- -i, --include=FILE include material from `FILE'
- -I, --opt-include=FILE include material from `FILE' if it exists
- -o, --output=FILE send output to `FILE'
- -p, --info-page=TEXT name of Texinfo manual
- -N, --no-info suppress pointer to Texinfo manual
- --help print this help, then exit
- --version print version number, then exit
-
-EXECUTABLE should accept `--help' and `--version' options although
-alternatives may be specified using:
-
- -h, --help-option=STRING help option string
- -v, --version-option=STRING version option string
-
-Report bugs to <bug-help2man\@gnu.org>.
-EOT
-
-my $section = 1;
-my $manual = '';
-my $source = '';
-my $help_option = '--help';
-my $version_option = '--version';
-my ($opt_name, @opt_include, $opt_output, $opt_info, $opt_no_info);
-
-my %opt_def = (
- 'n|name=s' => \$opt_name,
- 's|section=s' => \$section,
- 'm|manual=s' => \$manual,
- 'S|source=s' => \$source,
- 'i|include=s' => sub { push @opt_include, [ pop, 1 ] },
- 'I|opt-include=s' => sub { push @opt_include, [ pop, 0 ] },
- 'o|output=s' => \$opt_output,
- 'p|info-page=s' => \$opt_info,
- 'N|no-info' => \$opt_no_info,
- 'h|help-option=s' => \$help_option,
- 'v|version-option=s' => \$version_option,
-);
-
-# Parse options.
-Getopt::Long::config('bundling');
-GetOptions (%opt_def,
- help => sub { print $help_info; exit },
- version => sub { print $version_info; exit },
-) or die $help_info;
-
-die $help_info unless @ARGV == 1;
-
-my %include = ();
-my %append = ();
-my @include = (); # retain order given in include file
-
-# Process include file (if given). Format is:
-#
-# [section name]
-# verbatim text
-#
-# or
-#
-# /pattern/
-# verbatim text
-#
-
-while (@opt_include)
-{
- my ($inc, $required) = @{shift @opt_include};
-
- next unless -f $inc or $required;
- die "$this_program: can't open `$inc' ($!)\n"
- unless open INC, $inc;
-
- my $key;
- my $hash = \%include;
-
- while (<INC>)
- {
- # [section]
- if (/^\[([^]]+)\]/)
- {
- $key = uc $1;
- $key =~ s/^\s+//;
- $key =~ s/\s+$//;
- $hash = \%include;
- push @include, $key unless $include{$key};
- next;
- }
-
- # /pattern/
- if (m!^/(.*)/([ims]*)!)
- {
- my $pat = $2 ? "(?$2)$1" : $1;
-
- # Check pattern.
- eval { $key = qr($pat) };
- if ($@)
- {
- $@ =~ s/ at .*? line \d.*//;
- die "$inc:$.:$@";
- }
-
- $hash = \%append;
- next;
- }
-
- # Check for options before the first section--anything else is
- # silently ignored, allowing the first for comments and
- # revision info.
- unless ($key)
- {
- # handle options
- if (/^-/)
- {
- local @ARGV = split;
- GetOptions %opt_def;
- }
-
- next;
- }
-
- $hash->{$key} ||= '';
- $hash->{$key} .= $_;
- }
-
- close INC;
-
- die "$this_program: no valid information found in `$inc'\n"
- unless $key;
-}
-
-# Compress trailing blank lines.
-for my $hash (\(%include, %append))
-{
- for (keys %$hash) { $hash->{$_} =~ s/\n+$/\n/ }
-}
-
-# Turn off localisation of executable's output.
-@ENV{qw(LANGUAGE LANG LC_ALL)} = ('C') x 3;
-
-# Turn off localisation of date (for strftime).
-setlocale LC_TIME, 'C';
-
-# Grab help and version info from executable.
-my ($help_text, $version_text) = map {
- join '', map { s/ +$//; expand $_ } `$ARGV[0] $_ 2>/dev/null`
- or die "$this_program: can't get `$_' info from $ARGV[0]\n"
-} $help_option, $version_option;
-
-my $date = strftime "%B %Y", localtime;
-(my $program = $ARGV[0]) =~ s!.*/!!;
-my $package = $program;
-my $version;
-
-if ($opt_output)
-{
- unlink $opt_output
- or die "$this_program: can't unlink $opt_output ($!)\n"
- if -e $opt_output;
-
- open STDOUT, ">$opt_output"
- or die "$this_program: can't create $opt_output ($!)\n";
-}
-
-# The first line of the --version information is assumed to be in one
-# of the following formats:
-#
-# <version>
-# <program> <version>
-# {GNU,Free} <program> <version>
-# <program> ({GNU,Free} <package>) <version>
-# <program> - {GNU,Free} <package> <version>
-#
-# and seperated from any copyright/author details by a blank line.
-
-($_, $version_text) = split /\n+/, $version_text, 2;
-
-if (/^(\S+) +\(((?:GNU|Free) +[^)]+)\) +(.*)/ or
- /^(\S+) +- *((?:GNU|Free) +\S+) +(.*)/)
-{
- $program = $1;
- $package = $2;
- $version = $3;
-}
-elsif (/^((?:GNU|Free) +)?(\S+) +(.*)/)
-{
- $program = $2;
- $package = $1 ? "$1$2" : $2;
- $version = $3;
-}
-else
-{
- $version = $_;
-}
-
-$program =~ s!.*/!!;
-
-# No info for `info' itself.
-$opt_no_info = 1 if $program eq 'info';
-
-# --name overrides --include contents.
-$include{NAME} = "$program \\- $opt_name\n" if $opt_name;
-
-# Default (useless) NAME paragraph.
-$include{NAME} ||= "$program \\- manual page for $program $version\n";
-
-# Man pages traditionally have the page title in caps.
-my $PROGRAM = uc $program;
-
-# Set default page head/footers
-$source ||= "$program $version";
-unless ($manual)
-{
- for ($section)
- {
- if (/^(1[Mm]|8)/) { $manual = 'System Administration Utilities' }
- elsif (/^6/) { $manual = 'Games' }
- else { $manual = 'User Commands' }
- }
-}
-
-# Extract usage clause(s) [if any] for SYNOPSIS.
-if ($help_text =~ s/^Usage:( +(\S+))(.*)((?:\n(?: {6}\1| *or: +\S).*)*)//m)
-{
- my @syn = $2 . $3;
-
- if ($_ = $4)
- {
- s/^\n//;
- for (split /\n/) { s/^ *(or: +)?//; push @syn, $_ }
- }
-
- my $synopsis = '';
- for (@syn)
- {
- $synopsis .= ".br\n" if $synopsis;
- s!^\S*/!!;
- s/^(\S+) *//;
- $synopsis .= ".B $1\n";
- s/\s+$//;
- s/(([][]|\.\.+)+)/\\fR$1\\fI/g;
- s/^/\\fI/ unless s/^\\fR//;
- $_ .= '\fR';
- s/(\\fI)( *)/$2$1/g;
- s/\\fI\\fR//g;
- s/^\\fR//;
- s/\\fI$//;
- s/^\./\\&./;
-
- $synopsis .= "$_\n";
- }
-
- $include{SYNOPSIS} ||= $synopsis;
-}
-
-# Process text, initial section is DESCRIPTION.
-my $sect = 'DESCRIPTION';
-$_ = "$help_text\n\n$version_text";
-
-# Normalise paragraph breaks.
-s/^\n+//;
-s/\n*$/\n/;
-s/\n\n+/\n\n/g;
-
-# Temporarily exchange leading dots, apostrophes and backslashes for
-# tokens.
-s/^\./\x80/mg;
-s/^'/\x81/mg;
-s/\\/\x82/g;
-
-# Start a new paragraph (if required) for these.
-s/([^\n])\n(Report +bugs|Email +bug +reports +to|Written +by)/$1\n\n$2/g;
-
-sub convert_option;
-
-while (length)
-{
- # Convert some standard paragraph names.
- if (s/^(Options|Examples): *\n//)
- {
- $sect = uc $1;
- next;
- }
-
- # Copyright section
- if (/^Copyright +[(\xa9]/)
- {
- $sect = 'COPYRIGHT';
- $include{$sect} ||= '';
- $include{$sect} .= ".PP\n" if $include{$sect};
-
- my $copy;
- ($copy, $_) = split /\n\n/, $_, 2;
-
- for ($copy)
- {
- # Add back newline
- s/\n*$/\n/;
-
- # Convert iso9959-1 copyright symbol or (c) to nroff
- # character.
- s/^Copyright +(?:\xa9|\([Cc]\))/Copyright \\(co/mg;
-
- # Insert line breaks before additional copyright messages
- # and the disclaimer.
- s/(.)\n(Copyright |This +is +free +software)/$1\n.br\n$2/g;
-
- # Join hyphenated lines.
- s/([A-Za-z])-\n */$1/g;
- }
-
- $include{$sect} .= $copy;
- $_ ||= '';
- next;
- }
-
- # Catch bug report text.
- if (/^(Report +bugs|Email +bug +reports +to) /)
- {
- $sect = 'REPORTING BUGS';
- }
-
- # Author section.
- elsif (/^Written +by/)
- {
- $sect = 'AUTHOR';
- }
-
- # Examples, indicated by an indented leading $, % or > are
- # rendered in a constant width font.
- if (/^( +)([\$\%>] )\S/)
- {
- my $indent = $1;
- my $prefix = $2;
- my $break = '.IP';
- $include{$sect} ||= '';
- while (s/^$indent\Q$prefix\E(\S.*)\n*//)
- {
- $include{$sect} .= "$break\n\\f(CW$prefix$1\\fR\n";
- $break = '.br';
- }
-
- next;
- }
-
- my $matched = '';
- $include{$sect} ||= '';
-
- # Sub-sections have a trailing colon and the second line indented.
- if (s/^(\S.*:) *\n / /)
- {
- $matched .= $& if %append;
- $include{$sect} .= qq(.SS "$1"\n);
- }
-
- my $indent = 0;
- my $content = '';
-
- # Option with description.
- if (s/^( {1,10}([+-]\S.*?))(?:( +(?!-))|\n( {20,}))(\S.*)\n//)
- {
- $matched .= $& if %append;
- $indent = length ($4 || "$1$3");
- $content = ".TP\n\x83$2\n\x83$5\n";
- unless ($4)
- {
- # Indent may be different on second line.
- $indent = length $& if /^ {20,}/;
- }
- }
-
- # Option without description.
- elsif (s/^ {1,10}([+-]\S.*)\n//)
- {
- $matched .= $& if %append;
- $content = ".HP\n\x83$1\n";
- $indent = 80; # not continued
- }
-
- # Indented paragraph with tag.
- elsif (s/^( +(\S.*?) +)(\S.*)\n//)
- {
- $matched .= $& if %append;
- $indent = length $1;
- $content = ".TP\n\x83$2\n\x83$3\n";
- }
-
- # Indented paragraph.
- elsif (s/^( +)(\S.*)\n//)
- {
- $matched .= $& if %append;
- $indent = length $1;
- $content = ".IP\n\x83$2\n";
- }
-
- # Left justified paragraph.
- else
- {
- s/(.*)\n//;
- $matched .= $& if %append;
- $content = ".PP\n" if $include{$sect};
- $content .= "$1\n";
- }
-
- # Append continuations.
- while (s/^ {$indent}(\S.*)\n//)
- {
- $matched .= $& if %append;
- $content .= "\x83$1\n"
- }
-
- # Move to next paragraph.
- s/^\n+//;
-
- for ($content)
- {
- # Leading dot and apostrophe protection.
- s/\x83\./\x80/g;
- s/\x83'/\x81/g;
- s/\x83//g;
-
- # Convert options.
- s/(^| )(-[][\w=-]+)/$1 . convert_option $2/mge;
- }
-
- # Check if matched paragraph contains /pat/.
- if (%append)
- {
- for my $pat (keys %append)
- {
- if ($matched =~ $pat)
- {
- $content .= ".PP\n" unless $append{$pat} =~ /^\./;
- $content .= $append{$pat};
- }
- }
- }
-
- $include{$sect} .= $content;
-}
-
-# Refer to the real documentation.
-unless ($opt_no_info)
-{
- my $info_page = $opt_info || $program;
-
- $sect = 'SEE ALSO';
- $include{$sect} ||= '';
- $include{$sect} .= ".PP\n" if $include{$sect};
- $include{$sect} .= <<EOT;
-The full documentation for
-.B $program
-is maintained as a Texinfo manual. If the
-.B info
-and
-.B $program
-programs are properly installed at your site, the command
-.IP
-.B info $info_page
-.PP
-should give you access to the complete manual.
-EOT
-}
-
-# Output header.
-print <<EOT;
-.\\" DO NOT MODIFY THIS FILE! It was generated by $this_program $this_version.
-.TH $PROGRAM "$section" "$date" "$source" "$manual"
-EOT
-
-# Section ordering.
-my @pre = qw(NAME SYNOPSIS DESCRIPTION OPTIONS EXAMPLES);
-my @post = ('AUTHOR', 'REPORTING BUGS', 'COPYRIGHT', 'SEE ALSO');
-my $filter = join '|', @pre, @post;
-
-# Output content.
-for (@pre, (grep ! /^($filter)$/o, @include), @post)
-{
- if ($include{$_})
- {
- my $quote = /\W/ ? '"' : '';
- print ".SH $quote$_$quote\n";
-
- for ($include{$_})
- {
- # Replace leading dot, apostrophe and backslash tokens.
- s/\x80/\\&./g;
- s/\x81/\\&'/g;
- s/\x82/\\e/g;
- print;
- }
- }
-}
-
-exit;
-
-# Convert option dashes to \- to stop nroff from hyphenating 'em, and
-# embolden. Option arguments get italicised.
-sub convert_option
-{
- local $_ = '\fB' . shift;
-
- s/-/\\-/g;
- unless (s/\[=(.*)\]$/\\fR[=\\fI$1\\fR]/)
- {
- s/=(.)/\\fR=\\fI$1/;
- s/ (.)/ \\fI$1/;
- $_ .= '\fR';
- }
-
- $_;
-}
+++ /dev/null
-#!@PYTHON@
-# html-gettext.py
-
-# USAGE: html-gettext.py [-o OUTDIR] LANG FILES
-#
-# -o OUTDIR specifies that output files should be written in OUTDIR
-# rather than be overwritten
-#
-
-import sys
-import re
-import os
-import getopt
-
-import langdefs
-
-optlist, args = getopt.getopt(sys.argv[1:],'o:')
-lang = args[0]
-files = args [1:]
-
-outdir = '.'
-for x in optlist:
- if x[0] == '-o':
- outdir = x[1]
-
-double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
-my_gettext = langdefs.translation[lang]
-
-html_codes = ((' -- ', ' – '),
- (' --- ', ' — '),
- ("'", '’'))
-texi_html_conversion = {
- 'command': {
- 'html2texi':
- (re.compile (r'(?:<samp><span class="command">|<code>)(.*?)(?:</span></samp>|</code>)'),
- r'@command{\1}'),
- 'texi2html':
- (re.compile (r'@command{(.*?)}'),
- r'<code>\1</code>'),
- },
- 'code': {
- 'html2texi':
- (re.compile (r'<code>(.*?)</code>'),
- r'@code{\1}'),
- 'texi2html':
- (re.compile (r'@code{(.*?)}'),
- r'<code>\1</code>'),
- },
- }
-
-whitespaces = re.compile (r'\s+')
-
-
-def _ (s):
- if not s:
- return ''
- str = whitespaces.sub (' ', s)
- for c in html_codes:
- str = str.replace (c[1], c[0])
- for command in texi_html_conversion:
- d = texi_html_conversion[command]
- str = d['html2texi'][0].sub (d['html2texi'][1], str)
- str = my_gettext (str)
- str = d['texi2html'][0].sub (d['texi2html'][1], str)
- for c in html_codes:
- str = str.replace (c[0], c[1])
- return str
-
-link_re = re.compile (r'<link rel="(up|prev|next)" (.*?) title="([^"]*?)">')
-
-def link_gettext (m):
- return '<link rel="' + m.group (1) + '" ' + m.group (2) \
- + ' title="' + _ (m.group (3)) + '">'
-
-makeinfo_title_re = re.compile (r'<title>([^<]*?) - ([^<]*?)</title>')
-
-def makeinfo_title_gettext (m):
- return '<title>' + _ (m.group (1)) + ' - ' + m.group (2) + '</title>'
-
-texi2html_title_re = re.compile (r'<title>(.+): ([A-Z\d.]+ |)(.+?)</title>')
-
-def texi2html_title_gettext (m):
- return '<title>' + _ (m.group (1)) + double_punct_char_separator + ': ' \
- + m.group (2) + _ (m.group (3)) + '</title>'
-
-a_href_re = re.compile ('(?s)<a (?P<attributes>[^>]*?href="[\\w.#-_]+"[^>]*?>)(?P<code><code>)?\
-(?P<appendix>Appendix )?(?P<leading>[A-Z0-9.]+ | (?:<){1,2} | [^>:]+?: | |)\
-(?P<name>(?:<samp><span class="command">|</?code>|</span>|[^>])+?)(?P<end_code>(?(code)</code>|))\
-(?P<trailing> (?:>){1,2} | |)</a>:?')
-
-def a_href_gettext (m):
- s = ''
- if m.group(0)[-1] == ':':
- s = double_punct_char_separator + ':'
- t = ''
- if m.group ('appendix'):
- t = _ (m.group ('appendix'))
- return '<a ' + m.group ('attributes') + (m.group ('code') or '') + \
- t + m.group ('leading') + _ (m.group ('name')) + \
- m.group ('end_code') + m.group ('trailing') + '</a>' + s
-
-h_re = re.compile (r'<h(\d)( class="\w+"|)>\s*(Appendix |)([A-Z\d.]+ |)(.+?)\s*</h\1>')
-
-def h_gettext (m):
- if m.group (3):
- s = _ (m.group (3))
- else:
- s= ''
- return '<h' + m.group (1) + m.group (2) + '>' + s +\
- m.group (4) + _ (m.group (5)) + '</h' + m.group (1) + '>'
-
-for filename in files:
- f = open (filename, 'r')
- page = f.read ()
- f.close ()
- page = link_re.sub (link_gettext, page)
- page = makeinfo_title_re.sub (makeinfo_title_gettext, page)
- page = texi2html_title_re.sub (texi2html_title_gettext, page)
- page = a_href_re.sub (a_href_gettext, page)
- page = h_re.sub (h_gettext, page)
- for w in ('Next:', 'Previous:', 'Up:'):
- page = page.replace (w, _ (w))
- page = langdefs.LANGDICT[lang].html_filter (page)
- f = open (os.path.join (outdir, filename), 'w')
- f.write (page)
- f.close ()
+++ /dev/null
-#!@BASH@
-
-name=install-info-html
-version=1.0
-
-all=
-index_dir=.
-
-#
-# debugging
-#
-debug_echo=:
-
-
-#
-# print usage
-#
-help ()
-{
- cat << EOF
-$name $version
-Install HTML info document.
-
-Usage: $name [OPTIONS]... [DOCUMENT-DIR]...
-
-Options:
- -a, --all assume all subdirectories of index to be DOCUMENT-DIRs
- -d, --dir=DIR set index directory to DIR (default=.)
- -D, --debug print debugging info
- -h, --help show this help text
- -v, --version show version
-EOF
-}
-
-
-cleanup ()
-{
- $debug_echo "cleaning ($?)..."
-}
-
-trap cleanup 0 9 15
-
-#
-# Find command line options and switches
-#
-
-# "x:" x takes argument
-#
-options="adhvW:"
-#
-# ugh, "\-" is a hack to support long options
-# must be in double quotes for bash-2.0
-
-while getopts "\-:$options" O
-do
- $debug_echo "O: \`$O'"
- $debug_echo "arg: \`$OPTARG'"
- case $O in
- a)
- all=yes
- ;;
- D)
- [ "$debug_echo" = "echo" ] && set -x
- debug_echo=echo
- ;;
- h)
- help;
- exit 0
- ;;
- v)
- echo $name $version
- exit 0
- ;;
- d)
- index_dir=$OPTARG
- ;;
- # a long option!
- -)
- case "$OPTARG" in
- a*|-a*)
- all=yes
- ;;
- de*|-de*)
- [ "$debug_echo" = "echo" ] && set -x
- debug_echo=echo
- ;;
- h*|-h*)
- help;
- exit 0
- ;;
- di*|-di*)
- index_dir="`expr \"$OPTARG\" ':' '[^=]*=\(.*\)'`"
- ;;
- version|-version)
- echo $name $version
- exit 0
- ;;
- *|-*)
- echo "$0: invalid option -- \"$OPTARG\""
- help;
- exit -1
- ;;
- esac
- esac
-done
-shift `expr $OPTIND - 1`
-
-#
-# Input file name
-#
-if [ -z "$all" -a -z "$1" ]; then
- help
- echo "$name: No HTML documents given"
- exit 2
-fi
-
-if [ -n "$all" -a -n "$1" ]; then
- echo "$name: --all specified, ignoring DIRECTORY-DIRs"
-fi
-
-if [ -n "$all" ]; then
- document_dirs=`/bin/ls -d1 $index_dir`
-else
- document_dirs=$*
-fi
-
-index_file=$index_dir/index.html
-rm -f $index_file
-echo -n "$name: Writing index: $index_file..."
-
-# head
-cat >> $index_file <<EOF
-<html>
-<title>Info documentation index</title>
-<body>
-<h1>Info documentation index</h1>
-<p>
-This is the directory file \`index.html' a.k.a. \`DIR', which contains the
-topmost node of the HTML Info hierarchy.
-</p>
-<ul>
-EOF
-
-#list
-for i in $document_dirs; do
- cat <<EOF
-<li> <a href="$i/index.html">$i</a> (<a href="$i.html">$i as one big page</a>)</li>
-EOF
-done >> $index_file
-
-# foot
-cat >> $index_file <<EOF
-</ul>
-</body>
-</html>
-EOF
-echo
+++ /dev/null
-#!@PYTHON@
-
-# Created 01 September 2003 by Heikki Junes.
-# Rewritten by John Mandereau
-
-# Generates lilypond-words.el for (X)Emacs and lilypond-words[.vim] for Vim.
-
-import re
-import sys
-import os
-import getopt
-
-keywords = []
-reserved_words = []
-note_names = []
-
-# keywords not otherwise found
-keywords += ['include', 'maininput', 'version']
-
-# the main keywords
-s = open ('lily/lily-lexer.cc', 'r').read ()
-keywords += [w for w in re.findall (r"\s*{\"(.+)\",\s*.*},\s*\n", s)]
-
-s = open ('scm/markup.scm', 'r').read ()
-keywords += [w for w in re.findall (r"(?m)^\s*\(cons\s*([a-z-]+)-markup", s)]
-
-# identifiers and keywords
-for name in ['ly/chord-modifiers-init.ly',
- 'ly/dynamic-scripts-init.ly',
- 'ly/engraver-init.ly',
- 'ly/grace-init.ly',
- 'ly/gregorian.ly',
- 'ly/music-functions-init.ly',
- 'ly/performer-init.ly',
- 'ly/property-init.ly',
- 'ly/scale-definitions-init.ly',
- 'ly/script-init.ly',
- 'ly/spanners-init.ly',
- 'ly/declarations-init.ly',
- 'ly/params-init.ly']:
- s = open (name, 'r').read ()
- keywords += [w for w in re.findall (r"(?m)^\s*\"?([a-zA-Z]+)\"?\s*=", s)]
-
-# note names
-for name in ['ly/catalan.ly',
- 'ly/deutsch.ly',
- 'ly/drumpitch-init.ly',
- 'ly/english.ly',
- 'ly/espanol.ly',
- 'ly/italiano.ly',
- 'ly/nederlands.ly',
- 'ly/norsk.ly',
- 'ly/portugues.ly',
- 'ly/suomi.ly',
- 'ly/svenska.ly',
- 'ly/vlaams.ly']:
- s = open (name, 'r').read ()
- note_names += [n for n in re.findall (r"(?m)^\s*\(([a-z]+)[^l]+ly:make-pitch", s)]
-
-# reserved words
-for name in ['ly/engraver-init.ly',
- 'ly/performer-init.ly']:
- s = open (name, 'r').read ()
- for pattern in [r"(?m)^\s*.consists\s+\"([a-zA-Z_]+)\"",
- r"[\\]name\s+[\"]?([a-zA-Z_]+)[\"]?",
- r"\s+([a-zA-Z_]+)\s*\\(?:set|override)"]:
- reserved_words += [w for w in re.findall (pattern, s)]
-
-keywords = list (set (keywords))
-keywords.sort (reverse=True)
-
-reserved_words = list (set (reserved_words))
-reserved_words.sort (reverse=True)
-
-note_names = list (set (note_names))
-note_names.sort (reverse=True)
-
-
-# output
-outdir = ''
-out_words = False
-out_el = False
-out_vim = False
-
-options = getopt.getopt (sys.argv[1:],
- '', ['words', 'el', 'vim', 'dir='])[0]
-
-for (o, a) in options:
- if o == '--words':
- out_words = True
- elif o == '--el':
- out_el = True
- elif o == '--vim':
- out_vim = True
- elif o == '--dir':
- outdir = a
-
-if out_words or out_el:
- outstring = ''.join (['\\\\' + w + '\n' for w in keywords])
- outstring += ''.join ([w + '\n' for w in reserved_words])
- outstring += ''.join ([w + '\n' for w in note_names])
-
-if out_words:
- f = open (os.path.join (outdir, 'lilypond-words'), 'w')
- f.write (outstring)
-
-if out_el:
- f = open (os.path.join (outdir, 'lilypond-words.el'), 'w')
- f.write (outstring)
-
- # the menu in lilypond-mode.el
- # for easier typing of this list, replace '/' with '\' below
- # when writing to file
- elisp_menu = ['/( - _ /) -',
- '/[ - _ /] -',
- '< - _ > -',
- '<< - _ >> -',
- '///( - _ ///) -',
- '///[ - _ ///] -',
- '///< - _ ///! -',
- '///> - _ ///! -',
- '//center - / << _ >> -',
- '//column - / << _ >> -',
- '//context/ Staff/ = - % { _ } -',
- '//context/ Voice/ = - % { _ } -',
- '//markup - { _ } -',
- '//notes - { _ } -',
- '//relative - % { _ } -',
- '//score - { //n /? //simultaneous { //n _ //n } /! //n //paper { } //n /? //midi { } //n /! } //n -',
- '//simultaneous - { _ } -',
- '//sustainDown - _ //sustainUp -',
- '//times - % { _ } -',
- '//transpose - % { _ } -',
- '']
- f.write ('\n'.join ([line.replace ('/', '\\') for line in elisp_menu]))
-
-if out_vim:
- f = open (os.path.join (outdir, 'lilypond-words.vim'), 'w')
- f.write ('syn match lilyKeyword \"[-_^]\\?\\\\\\(')
- f.write (''.join ([w + '\\|' for w in keywords]))
- f.write ('n\\)\\(\\A\\|\\n\\)\"me=e-1\n')
-
- f.write ('syn match lilyReservedWord \"\\(\\A\\|\\n\\)\\(')
- f.write (''.join ([w + '\\|' for w in reserved_words]))
- f.write ('Score\\)\\(\\A\\|\\n\\)\"ms=s+1,me=e-1\n')
-
- f.write ('syn match lilyNote \"\\<\\(\\(\\(')
- f.write (''.join ([w + '\\|' for w in note_names]))
- f.write ('a\\)\\([,\']\\)\\{,4}\\([?!]\\)\\?\\)\\|s\\|r\\)\\(\\(128\\|64\\|32\\|16\\|8\\|4\\|2\\|1\\|\\\\breve\\|\\\\longa\\|\\\\maxima\\)[.]\\{,8}\\)\\?\\(\\A\\|\\n\\)\"me=e-1\n')
+++ /dev/null
-#!@PYTHON@
-
-
-'''
-TODO:
-
- * Add @nodes, split at sections?
-
-'''
-
-
-import sys
-import os
-import getopt
-import re
-
-program_name = 'lys-to-tely'
-
-include_snippets = '@lysnippets'
-fragment_options = 'printfilename,texidoc'
-help_text = r"""Usage: %(program_name)s [OPTIONS]... LY-FILE...
-Construct tely doc from LY-FILEs.
-
-Options:
- -h, --help print this help
- -f, --fragment-options=OPTIONS use OPTIONS as lilypond-book fragment
- options
- -o, --output=NAME write tely doc to NAME
- -t, --title=TITLE set tely doc title TITLE
- --template=TEMPLATE use TEMPLATE as Texinfo template file,
- instead of standard template; TEMPLATE should contain a command
- '%(include_snippets)s' to tell where to insert LY-FILEs. When this
- option is used, NAME and TITLE are ignored.
-"""
-
-def help (text):
- sys.stdout.write ( text)
- sys.exit (0)
-
-(options, files) = getopt.getopt (sys.argv[1:], 'f:hn:t:',
- ['fragment-options=', 'help', 'name=', 'title=', 'template='])
-
-name = "ly-doc"
-title = "Ly Doc"
-template = '''\input texinfo
-@setfilename %%(name)s.info
-@settitle %%(title)s
-
-@documentencoding utf-8
-@iftex
-@afourpaper
-@end iftex
-
-@finalout @c we do not want black boxes.
-
-@c fool ls-latex
-@ignore
-@author Han-Wen Nienhuys and Jan Nieuwenhuizen
-@title %%(title)s
-@end ignore
-
-@node Top, , , (dir)
-@top %%(title)s
-
-%s
-
-@bye
-''' % include_snippets
-
-for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '-h' or o == '--help':
- # We can't use vars () inside a function, as that only contains all
- # local variables and none of the global variables! Thus we have to
- # generate the help text here and pass it to the function...
- help (help_text % vars ())
- elif o == '-n' or o == '--name':
- name = a
- elif o == '-t' or o == '--title':
- title = a
- elif o == '-f' or o == '--fragment-options':
- fragment_options = a
- elif o == '--template':
- template = open (a, 'r').read ()
- else:
- raise Exception ('unknown option: ' + o)
-
-texi_file_re = re.compile ('.*\.i?te(ly|xi)$')
-
-def name2line (n):
- if texi_file_re.match (n):
- # We have a texi include file, simply include it:
- s = r"@include %s" % os.path.basename (n)
- else:
- # Assume it's a lilypond file -> create image etc.
- s = r"""
-@ifhtml
-@html
-<a name="%s"></a>
-@end html
-@end ifhtml
-
-@lilypondfile[%s]{%s}
-""" % (os.path.basename (n), fragment_options, n)
- return s
-
-if files:
- dir = os.path.dirname (name) or "."
-# don't strip .tely extension, input/lsr uses .itely
- name = os.path.basename (name)
- template = template % vars ()
-
- s = "\n".join (map (name2line, files))
- s = template.replace (include_snippets, s, 1)
- f = "%s/%s" % (dir, name)
- sys.stderr.write ("%s: writing %s..." % (program_name, f))
- h = open (f, "w")
- h.write (s)
- h.close ()
- sys.stderr.write ('\n')
-else:
- # not Unix philosophy, but hey, at least we notice when
- # we don't distribute any .ly files.
- sys.stderr.write ("No files specified. Doing nothing")
+++ /dev/null
-#!/usr/bin/env python
-
-import sys
-import os
-import glob
-import re
-
-USAGE = ''' Usage: makelsr.py LSR_SNIPPETS_DIR
-This script must be run from top of the source tree;
-it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR.
-'''
-
-LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it
-%% This file is in the public domain.
-'''
-
-LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new
-%% This file is in the public domain.
-'''
-
-DEST = os.path.join ('input', 'lsr')
-NEW_LYS = os.path.join ('input', 'new')
-TEXIDOCS = os.path.join ('input', 'texidocs')
-
-TAGS = []
-# NR 1
-TAGS.extend (['pitches', 'rhythms', 'expressive-marks',
-'repeats', 'simultaneous-notes', 'staff-notation',
-'editorial-annotations', 'text'])
-# NR 2
-TAGS.extend (['vocal-music', 'chords', 'keyboards',
-'percussion', 'fretted-strings', 'unfretted-strings',
-'ancient-notation', 'winds', 'world-music'
-])
-
-# other
-TAGS.extend (['contexts-and-engravers', 'tweaks-and-overrides',
-'paper-and-layout', 'breaks', 'spacing', 'midi', 'titles', 'template'])
-
-def exit_with_usage (n=0):
- sys.stderr.write (USAGE)
- sys.exit (n)
-
-try:
- in_dir = sys.argv[1]
-except:
- exit_with_usage (2)
-
-if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)):
- exit_with_usage (3)
-
-unsafe = []
-unconverted = []
-notags_files = []
-
-# mark the section that will be printed verbatim by lilypond-book
-end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S)
-
-def mark_verbatim_section (ly_code):
- return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1)
-
-# '% LSR' comments are to be stripped
-lsr_comment_re = re.compile (r'\s*%+\s*LSR.*')
-
-begin_header_re = re.compile (r'\\header\s*{', re.M)
-
-# add tags to ly files from LSR
-def add_tags (ly_code, tags):
- return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1)
-
-def copy_ly (srcdir, name, tags):
- global unsafe
- global unconverted
- dest = os.path.join (DEST, name)
- tags = ', '.join (tags)
- s = open (os.path.join (srcdir, name)).read ()
-
- texidoc_translations_path = os.path.join (TEXIDOCS,
- os.path.splitext (name)[0] + '.texidoc')
- if os.path.exists (texidoc_translations_path):
- texidoc_translations = open (texidoc_translations_path).read ()
- # Since we want to insert the translations verbatim using a
- # regexp, \\ is understood as ONE escaped backslash. So we have
- # to escape those backslashes once more...
- texidoc_translations = texidoc_translations.replace ('\\', '\\\\')
- s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1)
-
- if in_dir in srcdir:
- s = LY_HEADER_LSR + add_tags (s, tags)
- else:
- s = LY_HEADER_NEW + s
-
- s = mark_verbatim_section (s)
- s = lsr_comment_re.sub ('', s)
- open (dest, 'w').write (s)
-
- e = os.system ("convert-ly -e '%s'" % dest)
- if e:
- unconverted.append (dest)
- if os.path.exists (dest + '~'):
- os.remove (dest + '~')
- # -V seems to make unsafe snippets fail nicer/sooner
- e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest)
- if e:
- unsafe.append (dest)
-
-def read_source_with_dirs (src):
- s = {}
- l = {}
- for tag in TAGS:
- srcdir = os.path.join (src, tag)
- l[tag] = set (map (os.path.basename, glob.glob (os.path.join (srcdir, '*.ly'))))
- for f in l[tag]:
- if f in s:
- s[f][1].append (tag)
- else:
- s[f] = (srcdir, [tag])
- return s, l
-
-
-tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"')
-
-def read_source (src):
- s = {}
- l = dict ([(tag, set()) for tag in TAGS])
- for f in glob.glob (os.path.join (src, '*.ly')):
- basename = os.path.basename (f)
- m = tags_re.search (open (f, 'r').read ())
- if m:
- file_tags = [tag.strip() for tag in m.group (1). split(',')]
- s[basename] = (src, file_tags)
- [l[tag].add (basename) for tag in file_tags if tag in TAGS]
- else:
- notags_files.append (f)
- return s, l
-
-
-def dump_file_list (file, list):
- f = open (file, 'w')
- f.write ('\n'.join (list) + '\n')
-
-## clean out existing lys and generated files
-map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) +
- glob.glob (os.path.join (DEST, '*.snippet-list')))
-
-# read LSR source where tags are defined by subdirs
-snippets, tag_lists = read_source_with_dirs (in_dir)
-# read input/new where tags are directly
-s, l = read_source (NEW_LYS)
-snippets.update (s)
-for t in TAGS:
- tag_lists[t].update (l[t])
-
-for (name, (srcdir, tags)) in snippets.items ():
- copy_ly (srcdir, name, tags)
-
-for (tag, file_set) in tag_lists.items ():
- dump_file_list (os.path.join (DEST, tag + '.snippet-list'), sorted(file_set))
-
-if unconverted:
- sys.stderr.write ('These files could not be converted successfully by convert-ly:\n')
- sys.stderr.write ('\n'.join (unconverted) + '\n\n')
-
-if notags_files:
- sys.stderr.write ('No tags could be found in these files:\n')
- sys.stderr.write ('\n'.join (notags_files) + '\n\n')
-
-dump_file_list ('lsr-unsafe.txt', unsafe)
-sys.stderr.write ('''
-
-Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY!
- git add input/lsr/*.ly
- xargs git-diff HEAD < lsr-unsafe.txt
-
-''')
-
+++ /dev/null
-#!/usr/bin/python
-
-# This module is imported by check_texi_refs.py
-
-references_dict = {
- 'lilypond': 'ruser',
- 'lilypond-learning': 'rlearning',
- 'lilypond-program': 'rprogram',
- 'lilypond-snippets': 'rlsr',
- 'music-glossary': 'rglos',
- 'lilypond-internals': 'rinternals' }
+++ /dev/null
-#!@PYTHON@
-# mass-link.py
-
-# USAGE: mass-link.py [--prepend-suffix SUFFIX] symbolic | hard SOURCEDIR DESTDIR FILES
-#
-# create hard or symbolic links to SOURCEDIR/FILES in DESTDIR
-#
-# If --prepend-suffix is specified, link to foo.bar will be called fooSUFFIX.bar.
-# Shell wildcards expansion is performed on FILES.
-
-import sys
-import os
-import glob
-import getopt
-
-print "mass-link.py"
-
-optlist, args = getopt.getopt (sys.argv[1:], '', ['prepend-suffix='])
-link_type, source_dir, dest_dir = args[0:3]
-files = args[3:]
-
-source_dir = os.path.normpath (source_dir)
-dest_dir = os.path.normpath (dest_dir)
-
-prepended_suffix = ''
-for x in optlist:
- if x[0] == '--prepend-suffix':
- prepended_suffix = x[1]
-
-if prepended_suffix:
- def insert_suffix (p):
- l = p.split ('.')
- if len (l) >= 2:
- l[-2] += prepended_suffix
- return '.'.join (l)
- return p + prepended_suffix
-else:
- insert_suffix = lambda p: p
-
-if link_type == 'symbolic':
- link = os.symlink
-elif link_type == 'hard':
- link = os.link
-else:
- sys.stderr.write(sys.argv[0] + ': ' + link_type + ": wrong argument, expected 'symbolic' or 'hard'\n")
- sys.exit (1)
-
-sourcefiles = []
-for pattern in files:
- sourcefiles += (glob.glob (os.path.join (source_dir, pattern)))
-
-def relative_path (f):
- if source_dir == '.':
- return f
- return f[len (source_dir) + 1:]
-
-destfiles = [os.path.join (dest_dir, insert_suffix (relative_path (f))) for f in sourcefiles]
-
-destdirs = set ([os.path.dirname (dest) for dest in destfiles])
-[os.makedirs (d) for d in destdirs if not os.path.exists (d)]
-
-def force_link (src,dest):
- if os.path.exists (dest):
- os.system ('rm -f ' + dest)
- link (src, dest)
-
-map (force_link, sourcefiles, destfiles)
+++ /dev/null
-#!@PYTHON@
-
-# mf-to-table.py -- convert spacing info in MF logs .
-#
-# source file of the GNU LilyPond music typesetter
-#
-# (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
-
-import os
-import sys
-import getopt
-import re
-import time
-
-def read_log_file (fn):
- str = open (fn).read ()
- str = re.sub ('\n', '', str)
- str = re.sub ('[\t ]+', ' ', str)
-
- deps = []
- autolines = []
- def include_func (match, d = deps):
- d.append (match.group (1))
- return ''
-
- def auto_func (match, a = autolines):
- a.append (match.group (1))
- return ''
-
- str = re.sub ('\\(([/a-z.A-Z_0-9-]+\\.mf)', include_func, str)
- str = re.sub ('@{(.*?)@}', auto_func, str)
-
- return (autolines, deps)
-
-
-class Char_metric:
- def __init__ (self):
- pass
-
-font_family = 'feta'
-
-def parse_logfile (fn):
- autolines, deps = read_log_file (fn)
- charmetrics = []
-
- global_info = {
- 'filename' : os.path.splitext (os.path.basename (fn))[0]
- }
- group = ''
-
- for l in autolines:
- tags = l.split ('@:')
- if tags[0] == 'group':
- group = tags[1]
- elif tags[0] == 'puorg':
- group = ''
- elif tags[0] == 'char':
- name = tags[9]
-
- if group:
- name = group + '.' + name
- m = {
- 'description': tags[1],
- 'name': name,
- 'code': int (tags[2]),
- 'breapth': float (tags[3]),
- 'width': float (tags[4]),
- 'depth': float (tags[5]),
- 'height': float (tags[6]),
- 'wx': float (tags[7]),
- 'wy': float (tags[8]),
- }
- charmetrics.append (m)
- elif tags[0] == 'font':
- global font_family
- font_family = (tags[3])
- # To omit 'GNU' (foundry) from font name proper:
- # name = tags[2:]
- #urg
- if 0: # testing
- tags.append ('Regular')
-
- encoding = re.sub (' ','-', tags[5])
- tags = tags[:-1]
- name = tags[1:]
- global_info['design_size'] = float (tags[4])
- global_info['FontName'] = '-'.join (name)
- global_info['FullName'] = ' '.join (name)
- global_info['FamilyName'] = '-'.join (name[1:-1])
- if 1:
- global_info['Weight'] = tags[4]
- else: # testing
- global_info['Weight'] = tags[-1]
-
- global_info['FontBBox'] = '0 0 1000 1000'
- global_info['Ascender'] = '0'
- global_info['Descender'] = '0'
- global_info['EncodingScheme'] = encoding
-
- elif tags[0] == 'parameter':
- global_info[tags[1]] = tags[2];
-
- return (global_info, charmetrics, deps)
-
-
-
-def character_lisp_table (global_info, charmetrics):
-
- def conv_char_metric (charmetric):
- f = 1.0
- s = """(%s .
-((bbox . (%f %f %f %f))
-(subfont . "%s")
-(subfont-index . %d)
-(attachment . (%f . %f))))
-""" %(charmetric['name'],
- -charmetric['breapth'] * f,
- -charmetric['depth'] * f,
- charmetric['width'] * f,
- charmetric['height'] * f,
- global_info['filename'],
- charmetric['code'],
- charmetric['wx'],
- charmetric['wy'])
-
- return s
-
- s = ''
- for c in charmetrics:
- s += conv_char_metric (c)
-
- return s
-
-
-def global_lisp_table (global_info):
- str = ''
-
- keys = ['staffsize', 'stafflinethickness', 'staff_space',
- 'linethickness', 'black_notehead_width', 'ledgerlinethickness',
- 'design_size',
- 'blot_diameter'
- ]
- for k in keys:
- if global_info.has_key (k):
- str = str + "(%s . %s)\n" % (k,global_info[k])
-
- return str
-
-
-def ps_encoding (name, global_info, charmetrics):
- encs = ['.notdef'] * 256
- for m in charmetrics:
- encs[m['code']] = m['name']
-
-
- s = ('/%s [\n' % name)
- for m in range (0, 256):
- s += (' /%s %% %d\n' % (encs[m], m))
- s += ('] def\n')
- return s
-
-def get_deps (deps, targets):
- s = ''
- for t in targets:
- t = re.sub ( '^\\./', '', t)
- s += ('%s '% t)
- s += (": ")
- for d in deps:
- s += ('%s ' % d)
- s += ('\n')
- return s
-
-def help ():
- sys.stdout.write(r"""Usage: mf-to-table [OPTIONS] LOGFILEs
-
-Generate feta metrics table from preparated feta log.
-
-Options:
- -d, --dep=FILE print dependency info to FILE
- -h, --help print this help
- -l, --ly=FILE name output table
- -o, --outdir=DIR prefix for dependency info
- -p, --package=DIR specify package
-
- """)
- sys.exit (0)
-
-
-(options, files) = \
- getopt.getopt (sys.argv[1:],
- 'a:d:ho:p:t:',
- ['enc=', 'outdir=', 'dep=', 'lisp=',
- 'global-lisp=',
- 'debug', 'help', 'package='])
-
-global_lisp_nm = ''
-char_lisp_nm = ''
-enc_nm = ''
-depfile_nm = ''
-lyfile_nm = ''
-outdir_prefix = '.'
-
-for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--dep' or o == '-d':
- depfile_nm = a
- elif o == '--outdir' or o == '-o':
- outdir_prefix = a
- elif o == '--lisp':
- char_lisp_nm = a
- elif o == '--global-lisp':
- global_lisp_nm = a
- elif o == '--enc':
- enc_nm = a
- elif o== '--help' or o == '-h':
- help()
- elif o == '--debug':
- debug_b = 1
- else:
- print o
- raise getopt.error
-
-base = os.path.splitext (lyfile_nm)[0]
-
-for filenm in files:
- (g, m, deps) = parse_logfile (filenm)
-
- enc_name = 'FetaEncoding'
- if re.search ('parmesan', filenm):
- enc_name = 'ParmesanEncoding'
- elif re.search ('feta-brace', filenm):
- enc_name = 'FetaBraceEncoding'
- elif re.search ('feta-alphabet', filenm):
- enc_name = 'FetaAlphabetEncoding';
-
- open (enc_nm, 'w').write (ps_encoding (enc_name, g, m))
- open (char_lisp_nm, 'w').write (character_lisp_table (g, m))
- open (global_lisp_nm, 'w').write (global_lisp_table (g))
- if depfile_nm:
- open (depfile_nm, 'wb').write (get_deps (deps,
- [base + '.log', base + '.dvi', base + '.pfa',
- depfile_nm,
- base + '.pfb']))
+++ /dev/null
-#! /usr/bin/perl
-
-##################################################
-# Convert stylized Metafont to PostScript Type 1 #
-# By Scott Pakin <scott+mf@pakin.org> #
-##################################################
-
-########################################################################
-# mf2pt1 #
-# Copyright (C) 2008 Scott Pakin #
-# #
-# This program may be distributed and/or modified under the conditions #
-# of the LaTeX Project Public License, either version 1.3c of this #
-# license or (at your option) any later version. #
-# #
-# The latest version of this license is in: #
-# #
-# http://www.latex-project.org/lppl.txt #
-# #
-# and version 1.3c or later is part of all distributions of LaTeX #
-# version 2006/05/20 or later. #
-########################################################################
-
-our $VERSION = "2.4.4"; # mf2pt1 version number
-require 5.6.1; # I haven't tested mf2pt1 with older Perl versions
-
-use File::Basename;
-use File::Spec;
-use Getopt::Long;
-use Pod::Usage;
-use Math::Trig;
-use warnings;
-use strict;
-
-# Define some common encoding vectors.
-my @standardencoding =
- ((map {"_a$_"} (0..31)),
- qw (space exclam quotedbl numbersign dollar percent ampersand
- quoteright parenleft parenright asterisk plus comma hyphen
- period slash zero one two three four five six seven eight
- nine colon semicolon less equal greater question at A B C D E
- F G H I J K L M N O P Q R S T U V W X Y Z bracketleft
- backslash bracketright asciicircum underscore quoteleft a b c
- d e f g h i j k l m n o p q r s t u v w x y z braceleft bar
- braceright asciitilde),
- (map {"_a$_"} (127..160)),
- qw (exclamdown cent sterling fraction yen florin section currency
- quotesingle quotedblleft guillemotleft guilsinglleft
- guilsinglright fi fl _a176 endash dagger daggerdbl
- periodcentered _a181 paragraph bullet quotesinglbase
- quotedblbase quotedblright guillemotright ellipsis
- perthousand _a190 questiondown _a192 grave acute circumflex
- tilde macron breve dotaccent dieresis _a201 ring cedilla
- _a204 hungarumlaut ogonek caron emdash),
- (map {"_a$_"} (209..224)),
- qw (AE _a226 ordfeminine _a228 _a229 _a230 _a231 Lslash Oslash OE
- ordmasculine _a236 _a237 _a238 _a239 _a240 ae _a242 _a243
- _a244 dotlessi _a246 _a247 lslash oslash oe germandbls _a252
- _a253 _a254 _a255));
-my @isolatin1encoding =
- ((map {"_a$_"} (0..31)),
- qw (space exclam quotedbl numbersign dollar percent ampersand
- quoteright parenleft parenright asterisk plus comma minus
- period slash zero one two three four five six seven eight
- nine colon semicolon less equal greater question at A B C D E
- F G H I J K L M N O P Q R S T U V W X Y Z bracketleft
- backslash bracketright asciicircum underscore quoteleft a b c
- d e f g h i j k l m n o p q r s t u v w x y z braceleft bar
- braceright asciitilde),
- (map {"_a$_"} (128..143)),
- qw (dotlessi grave acute circumflex tilde macron breve dotaccent
- dieresis _a153 ring cedilla _a156 hungarumlaut ogonek
- caron space exclamdown cent sterling currency yen brokenbar
- section dieresis copyright ordfeminine guillemotleft
- logicalnot hyphen registered macron degree plusminus
- twosuperior threesuperior acute mu paragraph periodcentered
- cedilla onesuperior ordmasculine guillemotright onequarter
- onehalf threequarters questiondown Agrave Aacute Acircumflex
- Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex
- Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde
- Ograve Oacute Ocircumflex Otilde Odieresis multiply Oslash
- Ugrave Uacute Ucircumflex Udieresis Yacute Thorn germandbls
- agrave aacute acircumflex atilde adieresis aring ae ccedilla
- egrave eacute ecircumflex edieresis igrave iacute icircumflex
- idieresis eth ntilde ograve oacute ocircumflex otilde
- odieresis divide oslash ugrave uacute ucircumflex udieresis
- yacute thorn ydieresis));
-my @ot1encoding =
- qw (Gamma Delta Theta Lambda Xi Pi Sigma Upsilon Phi
- Psi Omega ff fi fl ffi ffl dotlessi dotlessj grave acute caron
- breve macron ring cedilla germandbls ae oe oslash AE OE Oslash
- suppress exclam quotedblright numbersign dollar percent
- ampersand quoteright parenleft parenright asterisk plus comma
- hyphen period slash zero one two three four five six seven
- eight nine colon semicolon exclamdown equal questiondown
- question at A B C D E F G H I J K L M N O P Q R S T U V W X Y
- Z bracketleft quotedblleft bracketright circumflex dotaccent
- quoteleft a b c d e f g h i j k l m n o p q r s t u v w x y z
- endash emdash hungarumlaut tilde dieresis);
-my @t1encoding =
- qw (grave acute circumflex tilde dieresis hungarumlaut ring caron
- breve macron dotaccent cedilla ogonek quotesinglbase
- guilsinglleft guilsinglright quotedblleft quotedblright
- quotedblbase guillemotleft guillemotright endash emdash cwm
- perthousand dotlessi dotlessj ff fi fl ffi ffl space exclam
- quotedbl numbersign dollar percent ampersand quoteright
- parenleft parenright asterisk plus comma hyphen period slash
- zero one two three four five six seven eight nine colon
- semicolon less equal greater question at A B C D E F G H I J K L
- M N O P Q R S T U V W X Y Z bracketleft backslash bracketright
- asciicircum underscore quoteleft a b c d e f g h i j k l m n o p
- q r s t u v w x y z braceleft bar braceright asciitilde
- sfthyphen Abreve Aogonek Cacute Ccaron Dcaron Ecaron Eogonek
- Gbreve Lacute Lcaron Lslash Nacute Ncaron Eng Ohungarumlaut
- Racute Rcaron Sacute Scaron Scedilla Tcaron Tcedilla
- Uhungarumlaut Uring Ydieresis Zacute Zcaron Zdotaccent IJ
- Idotaccent dcroat section abreve aogonek cacute ccaron dcaron
- ecaron eogonek gbreve lacute lcaron lslash nacute ncaron eng
- ohungarumlaut racute rcaron sacute scaron scedilla tcaron
- tcedilla uhungarumlaut uring ydieresis zacute zcaron zdotaccent
- ij exclamdown questiondown sterling Agrave Aacute Acircumflex
- Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex
- Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde Ograve
- Oacute Ocircumflex Otilde Odieresis OE Oslash Ugrave Uacute
- Ucircumflex Udieresis Yacute Thorn SS agrave aacute acircumflex
- atilde adieresis aring ae ccedilla egrave eacute ecircumflex
- edieresis igrave iacute icircumflex idieresis eth ntilde ograve
- oacute ocircumflex otilde odieresis oe oslash ugrave uacute
- ucircumflex udieresis yacute thorn germandbls);
-
-# Define font parameters that the user can override.
-my $fontversion;
-my $creationdate;
-my $comment;
-my $familyname;
-my $weight;
-my $fullname;
-my $fixedpitch;
-my $italicangle;
-my $underlinepos;
-my $underlinethick;
-my $fontname;
-my $uniqueID;
-my $designsize;
-my ($mffile, $pt1file, $pfbfile, $ffscript);
-my $encoding;
-my $rounding;
-my $bpppix;
-
-# Define all of our other global variables.
-my $progname = basename $0, ".pl";
-my $mag;
-my @fontbbox;
-my @charbbox;
-my @charwd;
-my @glyphname;
-my @charfiles;
-my $filebase;
-my $filedir;
-my $filenoext;
-my $versionmsg = "mf2pt1 version $VERSION
-
-Copyright (C) 2008 Scott Pakin
-
-This program may be distributed and/or modified under the conditions
-of the LaTeX Project Public License, either version 1.3c of this
-license or (at your option) any later version.
-
-The latest version of this license is in:
-
- http://www.latex-project.org/lppl.txt
-
-and version 1.3c or later is part of all distributions of LaTeX
-version 2006/05/20 or later.
-";
-
-
-######################################################################
-
-# The routines to compute the fractional approximation of a real number
-# are heavily based on code posted by Ben Tilly
-# <http://www.perlmonks.org/?node_id=26179> on Nov 16th, 2000, to the
-# PerlMonks list. See <http://www.perlmonks.org/index.pl?node_id=41961>.
-
-
-# Takes numerator/denominator pairs.
-# Returns a PS fraction string representation (with a trailing space).
-sub frac_string (@)
-{
- my $res = "";
-
- while (@_) {
- my $n = shift;
- my $d = shift;
- $res .= $n . " ";
- $res .= $d . " div " if $d > 1;
- }
-
- return $res;
-}
-
-
-# Takes a number.
-# Returns a numerator and denominator with the smallest denominator
-# so that the difference of the resulting fraction to the number is
-# smaller or equal to $rounding.
-sub frac_approx ($)
-{
- my $num = shift;
- my $f = ret_frac_iter ($num);
-
- while (1) {
- my ($n, $m) = $f->();
- my $approx = $n / $m;
- my $delta = abs ($num - $approx);
- return ($n, $m) if ($delta <= $rounding);
- }
-}
-
-
-# Takes a number, returns the best integer approximation and (in list
-# context) the error.
-sub best_int ($)
-{
- my $x = shift;
- my $approx = sprintf '%.0f', $x;
- if (wantarray) {
- return ($approx, $x - $approx);
- }
- else {
- return $approx;
- }
-}
-
-
-# Takes a numerator and denominator, in scalar context returns
-# the best fraction describing them, in list the numerator and
-# denominator.
-sub frac_standard ($$)
-{
- my $n = best_int(shift);
- my $m = best_int(shift);
- my $k = gcd($n, $m);
- $n /= $k;
- $m /= $k;
- if ($m < 0) {
- $n *= -1;
- $m *= -1;
- }
- if (wantarray) {
- return ($n, $m);
- }
- else {
- return "$n/$m";
- }
-}
-
-
-# Euclidean algorithm for calculating a GCD.
-# Takes two integers, returns the greatest common divisor.
-sub gcd ($$)
-{
- my ($n, $m) = @_;
- while ($m) {
- my $k = $n % $m;
- ($n, $m) = ($m, $k);
- }
- return $n;
-}
-
-
-# Takes a list of terms in a continued fraction, and converts it
-# into a fraction.
-sub ints_to_frac (@)
-{
- my ($n, $m) = (0, 1); # Start with 0
- while (@_) {
- my $k = pop;
- if ($n) {
- # Want frac for $k + 1/($n/$m)
- ($n, $m) = frac_standard($k*$n + $m, $n);
- }
- else {
- # Want $k
- ($n, $m) = frac_standard($k, 1);
- }
- }
- return frac_standard($n, $m);
-}
-
-
-# Takes a number, returns an anon sub which iterates through a set of
-# fractional approximations that converges very quickly to the number.
-sub ret_frac_iter ($)
-{
- my $x = shift;
- my $term_iter = ret_next_term_iter($x);
- my @ints;
- return sub {
- push @ints, $term_iter->();
- return ints_to_frac(@ints);
- }
-}
-
-
-# Terms of a continued fraction converging on that number.
-sub ret_next_term_iter ($)
-{
- my $x = shift;
- return sub {
- (my $n, $x) = best_int($x);
- if (0 != $x) {
- $x = 1/$x;
- }
- return $n;
- }
-}
-
-######################################################################
-
-# Round a number to the nearest integer.
-sub round ($)
-{
- return int($_[0] + 0.5*($_[0] <=> 0));
-}
-
-
-# Round a number to a given precision.
-sub prec ($)
-{
- return round ($_[0] / $rounding) * $rounding;
-}
-
-
-# Set a variable's value to the first defined value in the given list.
-# If the variable was not previously defined and no value in the list
-# is defined, do nothing.
-sub assign_default (\$@)
-{
- my $varptr = shift; # Pointer to variable to define
- return if defined $$varptr && $$varptr ne "UNSPECIFIED";
- foreach my $val (@_) {
- next if !defined $val;
- $$varptr = $val;
- return;
- }
-}
-
-
-# Print and execute a shell command. An environment variable with the
-# same name as the command overrides the command name. Return 1 on
-# success, 0 on failure. Optionally abort if the command fails, based
-# on the first argument to execute_command.
-sub execute_command ($@)
-{
- my $abort_on_failure = shift;
- my @command = @_;
- $command[0] = $ENV{uc $command[0]} || $command[0];
- my $prettyargs = join (" ", map {/[\\ ]/ ? "'$_'" : $_} @command);
- print "Invoking \"$prettyargs\"...\n";
- my $result = system @command;
- die "${progname}: \"$prettyargs\" failed ($!)\n" if $result && $abort_on_failure;
- return !$result;
-}
-
-
-# Output the font header.
-sub output_header ()
-{
- # Show the initial boilerplate.
- print OUTFILE <<"ENDHEADER";
-%!FontType1-1.0: $fontname $fontversion
-%%CreationDate: $creationdate
-% Font converted to Type 1 by mf2pt1, written by Scott Pakin.
-11 dict begin
-/FontInfo 11 dict dup begin
-/version ($fontversion) readonly def
-/Notice ($comment) readonly def
-/FullName ($fullname) readonly def
-/FamilyName ($familyname) readonly def
-/Weight ($weight) readonly def
-/ItalicAngle $italicangle def
-/isFixedPitch $fixedpitch def
-/UnderlinePosition $underlinepos def
-/UnderlineThickness $underlinethick def
-end readonly def
-/FontName /$fontname def
-ENDHEADER
-
- # If we're not using an encoding that PostScript knows about, then
- # create an encoding vector.
- if ($encoding==\@standardencoding) {
- print OUTFILE "/Encoding StandardEncoding def\n";
- }
- else {
- print OUTFILE "/Encoding 256 array\n";
- print OUTFILE "0 1 255 {1 index exch /.notdef put} for\n";
- foreach my $charnum (0 .. $#{$encoding}) {
- if ($encoding->[$charnum] && $encoding->[$charnum]!~/^_a\d+$/) {
- print OUTFILE "dup $charnum /$encoding->[$charnum] put\n";
- }
- }
- print OUTFILE "readonly def\n";
- }
-
- # Show the final boilerplate.
- print OUTFILE <<"ENDHEADER";
-/PaintType 0 def
-/FontType 1 def
-/FontMatrix [0.001 0 0 0.001 0 0] readonly def
-/UniqueID $uniqueID def
-/FontBBox{@fontbbox}readonly def
-currentdict end
-currentfile eexec
-dup /Private 5 dict dup begin
-/RD{string currentfile exch readstring pop}executeonly def
-/ND{noaccess def}executeonly def
-/NP{noaccess put}executeonly def
-ENDHEADER
-}
-
-
-# Use MetaPost to generate one PostScript file per character. We
-# calculate the font bounding box from these characters and store them
-# in @fontbbox. If the input parameter is 1, set other font
-# parameters, too.
-sub get_bboxes ($)
-{
- execute_command 1, ("mpost", "-mem=mf2pt1", "-progname=mpost",
- "\\mode:=localfont; mag:=$mag; bpppix $bpppix; input $mffile");
- opendir (CURDIR, ".") || die "${progname}: $! ($filedir)\n";
- @charfiles = sort
- { ($a=~ /\.(\d+)$/)[0] <=> ($b=~ /\.(\d+)$/)[0] }
- grep /^$filebase.*\.\d+$/, readdir(CURDIR);
- close CURDIR;
- @fontbbox = (1000000, 1000000, -1000000, -1000000);
- foreach my $psfile (@charfiles) {
- # Read the character number from the output file's extension.
- $psfile =~ /\.(\d+)$/;
- my $charnum = $1;
-
- # Process in turn each line of the current PostScript file.
- my $havebbox = 0;
- open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n";
- while (<PSFILE>) {
- my @tokens = split " ";
- if ($tokens[0] eq "%%BoundingBox:") {
- # Store the MetaPost-produced bounding box, just in case
- # the given font doesn't use beginchar.
- @tokens = ("%", "MF2PT1:", "glyph_dimensions", @tokens[1..4]);
- $havebbox--;
- }
- next if $#tokens<1 || $tokens[1] ne "MF2PT1:";
-
- # Process a "special" inserted into the generated PostScript.
- MF2PT1_CMD:
- {
- # glyph_dimensions llx lly urx ury -- specified glyph dimensions
- $tokens[2] eq "glyph_dimensions" && do {
- my @bbox = @tokens[3..6];
- $fontbbox[0]=$bbox[0] if $bbox[0]<$fontbbox[0];
- $fontbbox[1]=$bbox[1] if $bbox[1]<$fontbbox[1];
- $fontbbox[2]=$bbox[2] if $bbox[2]>$fontbbox[2];
- $fontbbox[3]=$bbox[3] if $bbox[3]>$fontbbox[3];
- $charbbox[$charnum] = \@bbox;
- $havebbox++;
- last MF2PT1_CMD;
- };
-
- # If all we want is the bounding box, exit the loop now.
- last MF2PT1_CMD if !$_[0];
-
- # glyph_name name -- glyph name
- $tokens[2] eq "glyph_name" && do {
- $glyphname[$charnum] = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # charwd wd -- character width as in TFM
- $tokens[2] eq "charwd" && do {
- $charwd[$charnum] = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_identifier name -- full font name
- $tokens[2] eq "font_identifier" && do {
- $fullname = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_size number -- font design size (pt, not bp)
- $tokens[2] eq "font_size" && $tokens[3] && do {
- $designsize = $tokens[3] * 72 / 72.27;
- last MF2PT1_CMD;
- };
-
- # font_slant number -- italic amount
- $tokens[2] eq "font_slant" && do {
- $italicangle = 0 + rad2deg (atan(-$tokens[3]));
- last MF2PT1_CMD;
- };
-
- # font_coding_scheme string -- font encoding
- $tokens[2] eq "font_coding_scheme" && do {
- $encoding = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_version string -- font version number (xxx.yyy)
- $tokens[2] eq "font_version" && do {
- $fontversion = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_comment string -- font comment notice
- $tokens[2] eq "font_comment" && do {
- $comment = join (" ", @tokens[3..$#tokens]);
- last MF2PT1_CMD;
- };
-
- # font_family string -- font family name
- $tokens[2] eq "font_family" && do {
- $familyname = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_weight string -- font weight (e.g., "Book" or "Heavy")
- $tokens[2] eq "font_weight" && do {
- $weight = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_fixed_pitch number -- fixed width font (0=false, 1=true)
- $tokens[2] eq "font_fixed_pitch" && do {
- $fixedpitch = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_underline_position number -- vertical underline position
- $tokens[2] eq "font_underline_position" && do {
- # We store $underlinepos in points and later
- # scale it by 1000/$designsize.
- $underlinepos = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_underline_thickness number -- thickness of underline
- $tokens[2] eq "font_underline_thickness" && do {
- # We store $underlinethick in points and later
- # scale it by 1000/$designsize.
- $underlinethick = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_name string -- font name
- $tokens[2] eq "font_name" && do {
- $fontname = $tokens[3];
- last MF2PT1_CMD;
- };
-
- # font_unique_id number (as string) -- globally unique font ID
- $tokens[2] eq "font_unique_id" && do {
- $uniqueID = 0+$tokens[3];
- last MF2PT1_CMD;
- };
- }
- }
- close PSFILE;
- if (!$havebbox) {
- warn "${progname}: No beginchar in character $charnum; glyph dimensions are probably incorrect\n";
- }
- }
-}
-
-
-# Convert ordinary, MetaPost-produced PostScript files into Type 1
-# font programs.
-sub output_font_programs ()
-{
- # Iterate over all the characters. We convert each one, line by
- # line and token by token.
- print "Converting PostScript graphics to Type 1 font programs...\n";
- foreach my $psfile (@charfiles) {
- # Initialize the font program.
- $psfile =~ /\.(\d+)$/;
- my $charnum = $1;
- my $gname = $glyphname[$charnum] || $encoding->[$charnum];
- my @fontprog;
- push @fontprog, ("/$gname {",
- frac_string (frac_approx ($charbbox[$charnum]->[0]),
- frac_approx ($charwd[$charnum] * $mag))
- . "hsbw");
- my ($cpx, $cpy) =
- ($charbbox[$charnum]->[0], 0); # Current point (PostScript)
-
- # Iterate over every line in the current file.
- open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n";
- while (my $oneline=<PSFILE>) {
- next if $oneline=~/^\%/;
- next if $oneline=~/set/; # Fortunately, "set" never occurs on "good" lines.
- my @arglist; # Arguments to current PostScript function
-
- # Iterate over every token in the current line.
- TOKENLOOP:
- foreach my $token (split " ", $oneline) {
- # Number: Round and push on the argument list.
- $token =~ /^[-.\d]+$/ && do {
- push @arglist, prec ($&);
- next TOKENLOOP;
- };
-
- # curveto: Convert to vhcurveto, hvcurveto, or rrcurveto.
- $token eq "curveto" && do {
- my ($dx1, $dy1) = ($arglist[0] - $cpx,
- $arglist[1] - $cpy);
- my ($dx1n, $dx1d) = frac_approx ($dx1);
- my ($dy1n, $dy1d) = frac_approx ($dy1);
- $cpx += $dx1n / $dx1d;
- $cpy += $dy1n / $dy1d;
-
- my ($dx2, $dy2) = ($arglist[2] - $cpx,
- $arglist[3] - $cpy);
- my ($dx2n, $dx2d) = frac_approx ($dx2);
- my ($dy2n, $dy2d) = frac_approx ($dy2);
- $cpx += $dx2n / $dx2d;
- $cpy += $dy2n / $dy2d;
-
- my ($dx3, $dy3) = ($arglist[4] - $cpx,
- $arglist[5] - $cpy);
- my ($dx3n, $dx3d) = frac_approx ($dx3);
- my ($dy3n, $dy3d) = frac_approx ($dy3);
- $cpx += $dx3n / $dx3d;
- $cpy += $dy3n / $dy3d;
-
- if (!$dx1n && !$dy3n) {
- push @fontprog, frac_string ($dy1n, $dy1d,
- $dx2n, $dx2d,
- $dy2n, $dy2d,
- $dx3n, $dx3d)
- . "vhcurveto";
- }
- elsif (!$dy1n && !$dx3n) {
- push @fontprog, frac_string ($dx1n, $dx1d,
- $dx2n, $dx2d,
- $dy2n, $dy2d,
- $dy3n, $dy3d)
- . "hvcurveto";
- }
- else {
- push @fontprog, frac_string ($dx1n, $dx1d,
- $dy1n, $dy1d,
- $dx2n, $dx2d,
- $dy2n, $dy2d,
- $dx3n, $dx3d,
- $dy3n, $dy3d)
- . "rrcurveto";
- }
- next TOKENLOOP;
- };
-
- # lineto: Convert to vlineto, hlineto, or rlineto.
- $token eq "lineto" && do {
- my ($dx, $dy) = ($arglist[0] - $cpx,
- $arglist[1] - $cpy);
- my ($dxn, $dxd) = frac_approx ($dx);
- my ($dyn, $dyd) = frac_approx ($dy);
- $cpx += $dxn / $dxd;
- $cpy += $dyn / $dyd;
-
- if (!$dxn) {
- push @fontprog, frac_string ($dyn, $dyd)
- . "vlineto" if $dyn;
- }
- elsif (!$dyn) {
- push @fontprog, frac_string ($dxn, $dxd)
- . "hlineto";
- }
- else {
- push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd)
- . "rlineto";
- }
- next TOKENLOOP;
- };
-
- # moveto: Convert to vmoveto, hmoveto, or rmoveto.
- $token eq "moveto" && do {
- my ($dx, $dy) = ($arglist[0] - $cpx,
- $arglist[1] - $cpy);
- my ($dxn, $dxd) = frac_approx ($dx);
- my ($dyn, $dyd) = frac_approx ($dy);
- $cpx += $dxn / $dxd;
- $cpy += $dyn / $dyd;
-
- if (!$dxn) {
- push @fontprog, frac_string ($dyn, $dyd)
- . "vmoveto";
- }
- elsif (!$dyn) {
- push @fontprog, frac_string ($dxn, $dxd)
- . "hmoveto";
- }
- else {
- push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd)
- . "rmoveto";
- }
- next TOKENLOOP;
- };
-
- # closepath: Output as is.
- $token eq "closepath" && do {
- push @fontprog, $token;
- next TOKENLOOP;
- };
- }
- }
- close PSFILE;
- push @fontprog, ("endchar",
- "} ND");
- print OUTFILE join ("\n\t", @fontprog), "\n";
- }
-}
-
-
-# Output the final set of code for the Type 1 font.
-sub output_trailer ()
-{
- print OUTFILE <<"ENDTRAILER";
-/.notdef {
- 0 @{[$fontbbox[2]-$fontbbox[0]]} hsbw
- endchar
- } ND
-end
-end
-readonly put
-noaccess put
-dup/FontName get exch definefont pop
-mark currentfile closefile
-cleartomark
-ENDTRAILER
-}
-
-######################################################################
-
-# Parse the command line. Asterisks in the following represents
-# commands also defined by Plain Metafont.
-my %opthash = ();
-GetOptions (\%opthash,
- "fontversion=s", # font_version
- "comment=s", # font_comment
- "family=s", # font_family
- "weight=s", # font_weight
- "fullname=s", # font_identifier (*)
- "fixedpitch!", # font_fixed_pitch
- "italicangle=f", # font_slant (*)
- "underpos=f", # font_underline_position
- "underthick=f", # font_underline_thickness
- "name=s", # font_name
- "uniqueid=i", # font_unique_id
- "designsize=f", # font_size (*)
- "encoding=s", # font_coding_scheme (*)
- "rounding=f",
- "bpppix=f",
- "ffscript=s",
- "h|help",
- "V|version") || pod2usage(2);
-if (defined $opthash{"h"}) {
- pod2usage(-verbose => 1,
- -output => \*STDOUT, # Bug workaround for Pod::Usage
- -exitval => "NOEXIT");
- print "Please e-mail bug reports to scott+mf\@pakin.org.\n";
- exit 1;
-}
-do {print $versionmsg; exit 1} if defined $opthash{"V"};
-pod2usage(2) if $#ARGV != 0;
-
-# Extract the filename from the command line.
-$mffile = $ARGV[0];
-my @fileparts = fileparse $mffile, ".mf";
-$filebase = $fileparts[0];
-$filedir = $fileparts[1];
-$filenoext = File::Spec->catfile ($filedir, $filebase);
-$pt1file = $filebase . ".pt1";
-$pfbfile = $filebase . ".pfb";
-
-assign_default $bpppix, $opthash{bpppix}, 0.02;
-
-# Make our first pass through the input, to set values for various options.
-$mag = 100; # Get a more precise bounding box.
-get_bboxes(1); # This might set $designsize.
-
-# Sanity-check the specified precision.
-assign_default $rounding, $opthash{rounding}, 1;
-if ($rounding<=0.0 || $rounding>1.0) {
- die sprintf "%s: Invalid rounding amount \"%g\"; value must be a positive number no greater than 1.0\n", $progname, $rounding;
-}
-
-# Ensure that every user-definable parameter is assigned a value.
-assign_default $fontversion, $opthash{fontversion}, "001.000";
-assign_default $creationdate, scalar localtime;
-assign_default $comment, $opthash{comment}, "Font converted to Type 1 by mf2pt1, written by Scott Pakin.";
-assign_default $weight, $opthash{weight}, "Medium";
-assign_default $fixedpitch, $opthash{fixedpitch}, 0;
-assign_default $uniqueID, $opthash{uniqueid}, int(rand(1000000)) + 4000000;
-assign_default $designsize, $opthash{designsize};
-die "${progname}: a design size must be specified in $mffile or on the command line\n" if !defined $designsize;
-die "${progname}: the design size must be a positive number\n" if $designsize<=0.0;
-assign_default $underlinepos, $opthash{underpos}, -1;
-$underlinepos = round(1000*$underlinepos/$designsize);
-assign_default $underlinethick, $opthash{underthick}, 0.5;
-$underlinethick = round(1000*$underlinethick/$designsize);
-assign_default $fullname, $opthash{fullname}, $filebase;
-assign_default $familyname, $opthash{family}, $fullname;
-assign_default $italicangle, $opthash{italicangle}, 0;
-assign_default $fontname, $opthash{name}, "$familyname-$weight";
-$fontname =~ s/\s//g;
-assign_default $encoding, $opthash{encoding}, "standard";
-my $encoding_name = $encoding;
-ENCODING:
-{
- if (-e $encoding) {
- # Filenames take precedence over built-in encodings.
- my @enc_array;
- open (ENCFILE, "<$encoding") || die "${progname}: $! ($encoding)\n";
- while (my $oneline = <ENCFILE>) {
- $oneline =~ s/\%.*$//;
- foreach my $word (split " ", $oneline) {
- push @enc_array, substr($word, 1) if substr($word, 0, 1) eq "/";
- }
- }
- close ENCFILE;
- $encoding_name = substr (shift @enc_array, 1);
- $encoding = \@enc_array;
- last ENCODING;
- }
- $encoding=\@standardencoding, last ENCODING if $encoding eq "standard";
- $encoding=\@isolatin1encoding, last ENCODING if $encoding eq "isolatin1";
- $encoding=\@ot1encoding, last ENCODING if $encoding eq "ot1";
- $encoding=\@t1encoding, last ENCODING if $encoding eq "t1";
- $encoding=\@glyphname, last ENCODING if $encoding eq "asis";
- warn "${progname}: Unknown encoding \"$encoding\"; using standard Adobe encoding\n";
- $encoding=\@standardencoding; # Default to standard encoding
-}
-assign_default $fixedpitch, $opthash{fixedpitch}, 0;
-$fixedpitch = $fixedpitch ? "true" : "false";
-assign_default $ffscript, $opthash{ffscript};
-
-# Output the final values of all of our parameters.
-print "\n";
-print <<"PARAMVALUES";
-mf2pt1 is using the following font parameters:
- font_version: $fontversion
- font_comment: $comment
- font_family: $familyname
- font_weight: $weight
- font_identifier: $fullname
- font_fixed_pitch: $fixedpitch
- font_slant: $italicangle
- font_underline_position: $underlinepos
- font_underline_thickness: $underlinethick
- font_name: $fontname
- font_unique_id: $uniqueID
- font_size: $designsize (bp)
- font_coding_scheme: $encoding_name
-PARAMVALUES
- ;
-print "\n";
-
-# Scale by a factor of 1000/design size.
-$mag = 1000.0 / $designsize;
-get_bboxes(0);
-print "\n";
-
-# Output the font in disassembled format.
-open (OUTFILE, ">$pt1file") || die "${progname}: $! ($pt1file)\n";
-output_header();
-printf OUTFILE "2 index /CharStrings %d dict dup begin\n",
- 1+scalar(grep {defined($_)} @charbbox);
-output_font_programs();
-output_trailer();
-close OUTFILE;
-unlink @charfiles;
-print "\n";
-
-# Convert from the disassembled font format to Type 1 binary format.
-if (!execute_command 0, ("t1asm", $pt1file, $pfbfile)) {
- die "${progname}: You'll need either to install t1utils and rerun $progname or find another way to convert $pt1file to $pfbfile\n";
- exit 1;
-}
-print "\n";
-unlink $pt1file;
-
-# Use FontForge to autohint the result.
-my $user_script = 0; # 1=script file was provided by the user; 0=created here
-if (defined $ffscript) {
- # The user provided his own script.
- $user_script = 1;
-}
-else {
- # Create a FontForge script file.
- $ffscript = $filebase . ".pe";
- open (FFSCRIPT, ">$ffscript") || die "${progname}: $! ($ffscript)\n";
- print FFSCRIPT <<'AUTOHINT';
-Open($1);
-SelectAll();
-RemoveOverlap();
-AddExtrema();
-Simplify(0, 2);
-CorrectDirection();
-Simplify(0, 2);
-RoundToInt();
-AutoHint();
-Generate($1);
-Quit(0);
-AUTOHINT
- ;
- close FFSCRIPT;
-}
-if (!execute_command 0, ("fontforge", "-script", $ffscript, $pfbfile)) {
- warn "${progname}: You'll need to install FontForge if you want $pfbfile autohinted (not required, but strongly recommended)\n";
-}
-unlink $ffscript if !$user_script;
-print "\n";
-
-# Finish up.
-print "*** Successfully generated $pfbfile! ***\n";
-exit 0;
-
-######################################################################
-
-__END__
-
-=head1 NAME
-
-mf2pt1 - produce a PostScript Type 1 font program from a Metafont source
-
-
-=head1 SYNOPSIS
-
-mf2pt1
-[B<--help>]
-[B<--version>]
-[B<--comment>=I<string>]
-[B<--designsize>=I<number>]
-[B<--encoding>=I<encoding>]
-[B<--family>=I<name>]
-[B<-->[B<no>]B<fixedpitch>]
-[B<--fontversion>=I<MMM.mmm>]
-[B<--fullname>=I<name>]
-[B<--italicangle>=I<number>]
-[B<--name>=I<name>]
-[B<--underpos>=I<number>]
-[B<--underthick>=I<number>]
-[B<--uniqueid>=I<number>]
-[B<--weight>=I<weight>]
-[B<--rounding>=I<number>]
-[B<--bpppix>=I<number>]
-[B<--ffscript>=I<file.pe>]
-I<infile>.mf
-
-
-=head1 WARNING
-
-The B<mf2pt1> Info file is the main source of documentation for
-B<mf2pt1>. This man page is merely a brief summary.
-
-
-=head1 DESCRIPTION
-
-B<mf2pt1> facilitates producing PostScript Type 1 fonts from a
-Metafont source file. It is I<not>, as the name may imply, an
-automatic converter of arbitrary Metafont fonts to Type 1 format.
-B<mf2pt1> imposes a number of restrictions on the Metafont input. If
-these restrictions are met, B<mf2pt1> will produce valid Type 1
-output. (Actually, it produces "disassembled" Type 1; the B<t1asm>
-program from the B<t1utils> suite will convert this to a true Type 1
-font.)
-
-=head2 Usage
-
- mf2pt1 myfont.mf
-
-=head1 OPTIONS
-
-Font parameters are best specified within a Metafont program. If
-necessary, though, command-line options can override any of these
-parameters. The B<mf2pt1> Info page, the primary source of B<mf2pt1>
-documentation, describes the following in greater detail.
-
-=over 4
-
-=item B<--help>
-
-Provide help on B<mf2pt1>'s command-line options.
-
-=item B<--version>
-
-Output the B<mf2pt1> version number, copyright, and license.
-
-=item B<--comment>=I<string>
-
-Include a font comment, usually a copyright notice.
-
-=item B<--designsize>=I<number>
-
-Specify the font design size in points.
-
-=item B<--encoding>=I<encoding>
-
-Designate the font encoding, either the name of a---typically
-F<.enc>---file which contains a PostScript font-encoding vector or one
-of C<standard> (the default), C<ot1>, C<t1>, or C<isolatin1>.
-
-=item B<--family>=I<name>
-
-Specify the font family.
-
-=item B<--fixedpitch>, B<--nofixedpitch>
-
-Assert that the font uses either monospaced (B<--fixedpitch>) or
-proportional (B<--nofixedpitch>) character widths.
-
-=item B<--fontversion>=I<MMM.mmm>
-
-Specify the font's major and minor version number.
-
-=item B<--fullname>=I<name>
-
-Designate the full font name (family plus modifiers).
-
-=item B<--italicangle>=I<number>
-
-Designate the italic angle in degrees counterclockwise from vertical.
-
-=item B<--name>=I<name>
-
-Provide the font name.
-
-=item B<--underpos>=I<number>
-
-Specify the vertical position of the underline in thousandths of the
-font height.
-
-=item B<--underthick>=I<number>
-
-Specify the thickness of the underline in thousandths of the font
-height.
-
-=item B<--uniqueid>=I<number>
-
-Specify a globally unique font identifier.
-
-=item B<--weight>=I<weight>
-
-Provide a description of the font weight (e.g., ``Heavy'').
-
-=item B<--rounding>=I<number>
-
-Specify the fraction of a font unit (0.0 < I<number> <= 1.0) to which
-to round coordinate values [default: 1.0].
-
-=item B<--bpppix>=I<number>
-
-Redefine the number of big points per pixel from 0.02 to I<number>.
-
-=item B<--ffscript>=I<file.pe>
-
-Name a script to pass to FontForge.
-
-=back
-
-
-=head1 FILES
-
-F<mf2pt1.mem> (which is generated from F<mf2pt1.mp> and F<mfplain.mp>)
-
-
-=head1 NOTES
-
-As stated in L</"WARNING">, the complete source of documentation for
-B<mf2pt1> is the Info page, not this man page.
-
-
-=head1 SEE ALSO
-
-mf(1), mpost(1), t1asm(1), fontforge(1)
-
-
-=head1 AUTHOR
-
-Scott Pakin, I<scott+mf@pakin.org>
+++ /dev/null
-#!@PYTHON@
-
-import re
-import os
-
-def new_link_path (link, dir, r):
- l = link.split ('/')
- d = dir.split ('/')
- i = 0
- while i < len(d) and i < len(l) and l[i] == '..':
- if r.match (d[i]):
- del l[i]
- else:
- i += 1
- return '/'.join ([x for x in l if not r.match (x)])
-
-def walk_tree (tree_roots = [],
- process_dirs = '.*',
- exclude_dirs = '',
- find_files = '.*',
- exclude_files = ''):
- """Walk directory trees and.returns (dirs, symlinks, files, extra_files) tuple.
-
- Arguments:
- tree_roots=DIRLIST use DIRLIST as tree roots list
- process_dir=PATTERN only process files in directories named PATTERN
- exclude_dir=PATTERN don't recurse into directories named PATTERN
- find_files=PATTERN filters files which are hardlinked
- exclude_files=PATTERN exclude files named PATTERN
- """
- find_files_re = re.compile (find_files)
- exclude_dirs_re = re.compile (exclude_dirs)
- exclude_files_re = re.compile (exclude_files)
- process_dirs_re = re.compile (process_dirs)
-
- dirs_paths = []
- symlinks_paths = []
- files_paths = []
-
- for d in tree_roots:
- for current_dir, dirs, files in os.walk(d):
- i = 0
- while i < len(dirs):
- if exclude_dirs_re.search (os.path.join (current_dir, dirs[i])):
- del dirs[i]
- else:
- p = os.path.join (current_dir, dirs[i])
- if os.path.islink (p):
- symlinks_paths.append (p)
- i += 1
- if not process_dirs_re.search (current_dir):
- continue
- dirs_paths.append (current_dir)
- for f in files:
- if exclude_files_re.match (f):
- continue
- p = os.path.join (current_dir, f)
- if os.path.islink (p):
- symlinks_paths.append (p)
- elif find_files_re.match (f):
- files_paths.append (p)
- return (dirs_paths, symlinks_paths, files_paths)
+++ /dev/null
-#!/usr/bin/env python
-
-notes = "CDEFGAB"
-alterations = [-1, 0, 1]
-
-def print_note (octave, note, alteration):
- print " <note>\n <pitch>\n <step>%s</step>" % notes[note]
- if alteration <> 0:
- print " <alter>%s</alter>" % alteration
- print " <octave>%s</octave>\n </pitch>\n <duration>1</duration>\n <voice>1</voice>\n <type>quarter</type>\n </note>" % octave
-
-
-print """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
- "http://www.musicxml.org/dtds/partwise.dtd">
-<score-partwise>
- <movement-title>Various piches and interval sizes</movement-title>
- <part-list>
- <score-part id="P1">
- <part-name>MusicXML Part</part-name>
- </score-part>
- </part-list>
- <!--=========================================================-->
- <part id="P1">
- <measure number="1">
- <attributes>
- <divisions>1</divisions>
- <key>
- <fifths>0</fifths>
- <mode>major</mode>
- </key>
- <time symbol="common">
- <beats>2</beats>
- <beat-type>4</beat-type>
- </time>
- <clef>
- <sign>G</sign>
- <line>2</line>
- </clef>
- </attributes>
-"""
-
-start_octave = 5
-
-for octave in (start_octave, start_octave+1):
- for note in (0,1,2,3,4,5,6):
- for alteration in alterations:
- if octave == start_octave and note == 0 and alteration == -1:
- continue
- print_note (octave, note, alteration)
-# if octave == start_octave and note == 0 and alteration == 0:
-# continue
- print_note (start_octave-(octave-start_octave)-(1-(7-note)/7), (7-note)%7, -alteration)
-
-print """ </measure>
- </part>
-</score-partwise>
-"""
+++ /dev/null
-#!/usr/bin/env python
-
-notes = "CDEFGAB"
-alterations = [-1, 0, 1]
-
-def print_measure (nr, fifth, mode, atts1 = "", atts = "", final = ""):
- print """ <measure number="%s">
- <attributes>
-%s <key>
- <fifths>%s</fifths>
- <mode>%s</mode>
- </key>
-%s </attributes>
- <note>
- <pitch>
- <step>C</step>
- <octave>4</octave>
- </pitch>
- <duration>2</duration>
- <voice>1</voice>
- <type>half</type>
- </note>
-%s </measure>""" % (nr, atts1, fifth, mode, atts, final)
-
-first_div = """ <divisions>1</divisions>
-"""
-first_atts = """ <time symbol="common">
- <beats>2</beats>
- <beat-type>4</beat-type>
- </time>
- <clef>
- <sign>G</sign>
- <line>2</line>
- </clef>
-"""
-
-final_barline = """ <barline location="right">
- <bar-style>light-heavy</bar-style>
- </barline>
-"""
-
-print """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
- "http://www.musicxml.org/dtds/partwise.dtd">
-<score-partwise>
- <movement-title>Different Key signatures</movement-title>
- <identification>
- <miscellaneous>
- <miscellaneous-field name="description">Various key signature: from 11
- flats to 11 sharps (each one first one measure in major, then one
- measure in minor)</miscellaneous-field>
- </miscellaneous>
- </identification>
- <part-list>
- <score-part id="P1">
- <part-name>MusicXML Part</part-name>
- </score-part>
- </part-list>
- <!--=========================================================-->
- <part id="P1">"""
-
-max_range = 11
-measure = 0
-for fifth in range(-max_range, max_range+1):
- measure += 1
- if fifth == -max_range:
- print_measure (measure, fifth, "major", first_div, first_atts)
- else:
- print_measure (measure, fifth, "major")
- measure += 1
- if fifth == max_range:
- print_measure (measure, fifth, "minor", "", "", final_barline)
- else:
- print_measure (measure, fifth, "minor")
-
-
-print """ </part>
-</score-partwise>"""
+++ /dev/null
-#!/usr/bin/env python
-
-notes = "CDEFGAB"
-alterations = [-1, 0, 1]
-
-dot_xml = """ <dot/>
-"""
-tie_xml = """ <tie type="%s"/>
-"""
-tie_notation_xml = """ <notations><tied type="%s"/></notations>
-"""
-
-
-def generate_note (duration, end_tie = False):
- if duration < 2:
- (notetype, dur) = ("8th", 1)
- elif duration < 4:
- (notetype, dur) = ("quarter", 2)
- elif duration < 8:
- (notetype, dur) = ("half", 4)
- else:
- (notetype, dur) = ("whole", 8)
- dur_processed = dur
- dot = ""
- if (duration - dur_processed >= dur/2):
- dot = dot_xml
- dur_processed += dur/2
- if (duration - dur_processed >= max(dur/4, 1)):
- dot += dot_xml
- dur_processed += dur/4
- tie = ""
- tie_notation = ""
- if end_tie:
- tie += tie_xml % "stop"
- tie_notation += tie_notation_xml % "stop"
- second_note = None
- if duration - dur_processed > 0:
- second_note = generate_note (duration-dur_processed, True)
- tie += tie_xml % "start"
- tie_notation += tie_notation_xml % "start"
- note = """ <note>
- <pitch>
- <step>C</step>
- <octave>5</octave>
- </pitch>
- <duration>%s</duration>
-%s <voice>1</voice>
- <type>%s</type>
-%s%s </note>""" % (dur_processed, tie, notetype, dot, tie_notation)
- if second_note:
- return "%s\n%s" % (note, second_note)
- else:
- return note
-
-def print_measure (nr, beats, type, params = "", attr = "", attr2 = "", barline = ""):
- duration = 8*beats/type
- note = generate_note (duration)
-
- print """ <measure number="%s">
- <attributes>
-%s <time%s>
- <beats>%s</beats>
- <beat-type>%s</beat-type>
- </time>
-%s </attributes>
-%s
-%s </measure>""" % (nr, attr, params, beats, type, attr2, note, barline)
-
-first_key = """ <divisions>2</divisions>
- <key>
- <fifths>0</fifths>
- <mode>major</mode>
- </key>
-"""
-first_clef = """ <clef>
- <sign>G</sign>
- <line>2</line>
- </clef>
-"""
-
-final_barline = """ <barline location="right">
- <bar-style>light-heavy</bar-style>
- </barline>
-"""
-
-print """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
- "http://www.musicxml.org/dtds/partwise.dtd">
-<score-partwise>
- <identification>
- <miscellaneous>
- <miscellaneous-field name="description">Various time signatures: 2/2
- (alla breve), 4/4 (C), 2/2, 3/2, 2/4, 3/4, 4/4, 5/4, 3/8, 6/8,
- 12/8</miscellaneous-field>
- </miscellaneous>
- </identification>
- <part-list>
- <score-part id="P1">
- <part-name>MusicXML Part</part-name>
- </score-part>
- </part-list>
- <!--=========================================================-->
- <part id="P1">"""
-
-measure = 1
-
-print_measure (measure, 2, 2, " symbol=\"common\"", first_key, first_clef)
-measure += 1
-
-print_measure (measure, 4, 4, " symbol=\"common\"")
-measure += 1
-
-print_measure (measure, 2, 2)
-measure += 1
-
-print_measure (measure, 3, 2)
-measure += 1
-
-print_measure (measure, 2, 4)
-measure += 1
-
-print_measure (measure, 3, 4)
-measure += 1
-
-print_measure (measure, 4, 4)
-measure += 1
-
-print_measure (measure, 5, 4)
-measure += 1
-
-print_measure (measure, 3, 8)
-measure += 1
-
-print_measure (measure, 6, 8)
-measure += 1
-
-print_measure (measure, 12, 8, "", "", "", final_barline)
-measure += 1
-
-print """ </part>
-</score-partwise>"""
+++ /dev/null
-#!/usr/bin/env python
-# mutopia-index.py
-
-import fnmatch
-import getopt
-import os
-import re
-import stat
-import sys
-
-def find (pat, dir):
- f = os.popen ('find %s -name "%s"'% (dir, pat))
- lst = []
- for a in f.readlines():
- a = a[:-1]
- lst.append (a)
- return lst
-
-
-junk_prefix = 'out-www/'
-
-headertext= r"""
-
-<h1>LilyPond samples</h1>
-
-
-<p>You are looking at a page with some LilyPond samples. These files
-are also included in the distribution. The output is completely
-generated from the source file, without any further touch up.
-
-<p>
-
-The pictures are 90 dpi anti-aliased snapshots of the printed output.
-For a good impression of the quality print out the PDF file.
-"""
-
-headertext_nopics= r"""
-<p>No examples were found in this directory.
-"""
-
-#
-# FIXME breaks on multiple strings.
-#
-def read_lilypond_header (fn):
- s = open (fn).read ()
- s = re.sub ('%.*$', '', s)
- s = re.sub ('\n', ' ', s)
-
- dict = {}
- m = re.search (r"""\\header\s*{([^}]*)}""", s)
-
- if m:
- s = m.group (1)
- else:
- return dict
-
- while s:
- m = re.search (r'''\s*(\S+)\s*=\s*"([^"]+)"''', s)
- if m == None:
- s = ''
- else:
- s = s[m.end (0):]
- left = m.group (1)
- right = m.group (2)
-
- left = re.sub ('"', '', left)
- right = re.sub ('"', '', right)
- dict[left] = right
-
- return dict
-
-def help ():
- sys.stdout.write (r'''Usage: mutopia-index [OPTIONS] INFILE OUTFILE
-Generate index for mutopia.
-
-Options:
- -h, --help print this help
- -o, --output=FILE write output to file
- -s, --subdirs=DIR add subdir
- --suffix=SUF specify suffix
-
-''')
- sys.exit (0)
-
-# ugh.
-def gen_list (inputs, file_name):
- sys.stderr.write ("generating HTML list %s" % file_name)
- sys.stderr.write ('\n')
- if file_name:
- list = open (file_name, 'w')
- else:
- list = sys.stdout
- list.write ('''<html><head><title>Rendered Examples</title>
-<style type="text/css">
-hr { border:0; height:1; color: #000000; background-color: #000000; }\n
-</style>
-<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
-</head>''')
-
- list.write ('<body bgcolor=white>\n')
-
- if inputs:
- list.write (headertext)
- else:
- list.write (headertext_nopics)
-
- for ex in inputs:
- print ex
-
- (base, ext) = os.path.splitext (ex)
- (base, ext2) = os.path.splitext (base)
- ext = ext2 + ext
-
- header = read_lilypond_header (ex)
- head = header.get ('title', os.path.basename (base))
- composer = header.get ('composer', '')
- desc = header.get ('description', '')
- list.write ('<hr>\n')
- list.write ('<h1>%s</h1>\n' % head);
- if composer:
- list.write ('<h2>%s</h2>\n' % composer)
- if desc:
- list.write ('%s<p>' % desc)
- list.write ('<ul>\n')
-
- def list_item (file_name, desc, type, lst = list):
- if os.path.isfile (file_name):
- lst.write ('<li><a href="%s">%s</a>'
- % (re.sub (junk_prefix, '', file_name), desc))
-
- # FIXME: include warning if it uses \include
- # files.
-
- size = os.stat (file_name)[stat.ST_SIZE]
- kB = (size + 512) / 1024
- if kB:
- lst.write (' (%s %d kB)' % (type, kB))
- else:
- lst.write (' (%s %d characters)'
- % (type, size))
- pictures = ['jpeg', 'png', 'xpm']
- lst.write ('\n')
- else:
- print "cannot find" , `file_name`
-
- list_item (base + ext, 'The input', 'ASCII')
-
- pages_found = 0
- for page in range (1, 100):
- f = base + '-page%d.png' % page
-
- if not os.path.isfile (f):
- break
- pages_found += 1
- list_item (f, 'See a picture of page %d' % page, 'png')
-
- if pages_found == 0 and os.path.exists (base + '.png'):
- list_item (base + ".png",
- 'See a picture', 'png')
-
-
- list_item (base + '.pdf', 'Print', 'PDF')
- list_item (base + '.midi', 'Listen', 'MIDI')
- list.write ('</ul>\n');
-
- list.write ('</body></html>\n');
- list.close ()
-
-(options, files) = getopt.getopt (sys.argv[1:],
- 'ho:', ['help', 'output='])
-outfile = 'examples.html'
-
-subdirs = []
-for (o, a) in options:
- if o == '--help' or o == '-h':
- help ()
- elif o == '--output' or o == '-o':
- outfile = a
-
-dirs = []
-for f in files:
- dirs += find ('out-www', f)
-
-if not dirs:
- dirs = ['.']
-
-allfiles = []
-
-for d in dirs:
- allfiles += find ('*.ly', d)
-
-allfiles = [f for f in allfiles
- if not f.endswith ('snippet-map.ly')
- and not re.search ('lily-[0-9a-f]+', f)
- and 'musicxml' not in f]
-
-gen_list (allfiles, outfile)
+++ /dev/null
-#!@TARGET_PYTHON@
-import sys
-import optparse
-import os
-import math
-
-## so we can call directly as buildscripts/output-distance.py
-me_path = os.path.abspath (os.path.split (sys.argv[0])[0])
-sys.path.insert (0, me_path + '/../python/')
-sys.path.insert (0, me_path + '/../python/out/')
-
-
-X_AXIS = 0
-Y_AXIS = 1
-INFTY = 1e6
-
-OUTPUT_EXPRESSION_PENALTY = 1
-ORPHAN_GROB_PENALTY = 1
-options = None
-
-################################################################
-# system interface.
-temp_dir = None
-class TempDirectory:
- def __init__ (self):
- import tempfile
- self.dir = tempfile.mkdtemp ()
- print 'dir is', self.dir
- def __del__ (self):
- print 'rm -rf %s' % self.dir
- os.system ('rm -rf %s' % self.dir)
- def __call__ (self):
- return self.dir
-
-
-def get_temp_dir ():
- global temp_dir
- if not temp_dir:
- temp_dir = TempDirectory ()
- return temp_dir ()
-
-def read_pipe (c):
- print 'pipe' , c
- return os.popen (c).read ()
-
-def system (c):
- print 'system' , c
- s = os.system (c)
- if s :
- raise Exception ("failed")
- return
-
-def shorten_string (s):
- threshold = 15
- if len (s) > 2*threshold:
- s = s[:threshold] + '..' + s[-threshold:]
- return s
-
-def max_distance (x1, x2):
- dist = 0.0
-
- for (p,q) in zip (x1, x2):
- dist = max (abs (p-q), dist)
-
- return dist
-
-
-def compare_png_images (old, new, dest_dir):
- def png_dims (f):
- m = re.search ('([0-9]+) x ([0-9]+)', read_pipe ('file %s' % f))
-
- return tuple (map (int, m.groups ()))
-
- dest = os.path.join (dest_dir, new.replace ('.png', '.compare.jpeg'))
- try:
- dims1 = png_dims (old)
- dims2 = png_dims (new)
- except AttributeError:
- ## hmmm. what to do?
- system ('touch %(dest)s' % locals ())
- return
-
- dims = (min (dims1[0], dims2[0]),
- min (dims1[1], dims2[1]))
-
- dir = get_temp_dir ()
- system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop1.png' % (dims + (old, dir)))
- system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop2.png' % (dims + (new, dir)))
-
- system ('compare -depth 8 %(dir)s/crop1.png %(dir)s/crop2.png %(dir)s/diff.png' % locals ())
-
- system ("convert -depth 8 %(dir)s/diff.png -blur 0x3 -negate -channel alpha,blue -type TrueColorMatte -fx 'intensity' %(dir)s/matte.png" % locals ())
-
- system ("composite -compose atop -quality 65 %(dir)s/matte.png %(new)s %(dest)s" % locals ())
-
-
-################################################################
-# interval/bbox arithmetic.
-
-empty_interval = (INFTY, -INFTY)
-empty_bbox = (empty_interval, empty_interval)
-
-def interval_is_empty (i):
- return i[0] > i[1]
-
-def interval_length (i):
- return max (i[1]-i[0], 0)
-
-def interval_union (i1, i2):
- return (min (i1[0], i2[0]),
- max (i1[1], i2[1]))
-
-def interval_intersect (i1, i2):
- return (max (i1[0], i2[0]),
- min (i1[1], i2[1]))
-
-def bbox_is_empty (b):
- return (interval_is_empty (b[0])
- or interval_is_empty (b[1]))
-
-def bbox_union (b1, b2):
- return (interval_union (b1[X_AXIS], b2[X_AXIS]),
- interval_union (b2[Y_AXIS], b2[Y_AXIS]))
-
-def bbox_intersection (b1, b2):
- return (interval_intersect (b1[X_AXIS], b2[X_AXIS]),
- interval_intersect (b2[Y_AXIS], b2[Y_AXIS]))
-
-def bbox_area (b):
- return interval_length (b[X_AXIS]) * interval_length (b[Y_AXIS])
-
-def bbox_diameter (b):
- return max (interval_length (b[X_AXIS]),
- interval_length (b[Y_AXIS]))
-
-
-def difference_area (a, b):
- return bbox_area (a) - bbox_area (bbox_intersection (a,b))
-
-class GrobSignature:
- def __init__ (self, exp_list):
- (self.name, self.origin, bbox_x,
- bbox_y, self.output_expression) = tuple (exp_list)
-
- self.bbox = (bbox_x, bbox_y)
- self.centroid = (bbox_x[0] + bbox_x[1], bbox_y[0] + bbox_y[1])
-
- def __repr__ (self):
- return '%s: (%.2f,%.2f), (%.2f,%.2f)\n' % (self.name,
- self.bbox[0][0],
- self.bbox[0][1],
- self.bbox[1][0],
- self.bbox[1][1])
-
- def axis_centroid (self, axis):
- return apply (sum, self.bbox[axis]) / 2
-
- def centroid_distance (self, other, scale):
- return max_distance (self.centroid, other.centroid) / scale
-
- def bbox_distance (self, other):
- divisor = bbox_area (self.bbox) + bbox_area (other.bbox)
-
- if divisor:
- return (difference_area (self.bbox, other.bbox) +
- difference_area (other.bbox, self.bbox)) / divisor
- else:
- return 0.0
-
- def expression_distance (self, other):
- if self.output_expression == other.output_expression:
- return 0
- else:
- return 1
-
-################################################################
-# single System.
-
-class SystemSignature:
- def __init__ (self, grob_sigs):
- d = {}
- for g in grob_sigs:
- val = d.setdefault (g.name, [])
- val += [g]
-
- self.grob_dict = d
- self.set_all_bbox (grob_sigs)
-
- def set_all_bbox (self, grobs):
- self.bbox = empty_bbox
- for g in grobs:
- self.bbox = bbox_union (g.bbox, self.bbox)
-
- def closest (self, grob_name, centroid):
- min_d = INFTY
- min_g = None
- try:
- grobs = self.grob_dict[grob_name]
-
- for g in grobs:
- d = max_distance (g.centroid, centroid)
- if d < min_d:
- min_d = d
- min_g = g
-
-
- return min_g
-
- except KeyError:
- return None
- def grobs (self):
- return reduce (lambda x,y: x+y, self.grob_dict.values(), [])
-
-################################################################
-## comparison of systems.
-
-class SystemLink:
- def __init__ (self, system1, system2):
- self.system1 = system1
- self.system2 = system2
-
- self.link_list_dict = {}
- self.back_link_dict = {}
-
-
- ## pairs
- self.orphans = []
-
- ## pair -> distance
- self.geo_distances = {}
-
- ## pairs
- self.expression_changed = []
-
- self._geometric_distance = None
- self._expression_change_count = None
- self._orphan_count = None
-
- for g in system1.grobs ():
-
- ## skip empty bboxes.
- if bbox_is_empty (g.bbox):
- continue
-
- closest = system2.closest (g.name, g.centroid)
-
- self.link_list_dict.setdefault (closest, [])
- self.link_list_dict[closest].append (g)
- self.back_link_dict[g] = closest
-
-
- def calc_geometric_distance (self):
- total = 0.0
- for (g1,g2) in self.back_link_dict.items ():
- if g2:
- d = g1.bbox_distance (g2)
- if d:
- self.geo_distances[(g1,g2)] = d
-
- total += d
-
- self._geometric_distance = total
-
- def calc_orphan_count (self):
- count = 0
- for (g1, g2) in self.back_link_dict.items ():
- if g2 == None:
- self.orphans.append ((g1, None))
-
- count += 1
-
- self._orphan_count = count
-
- def calc_output_exp_distance (self):
- d = 0
- for (g1,g2) in self.back_link_dict.items ():
- if g2:
- d += g1.expression_distance (g2)
-
- self._expression_change_count = d
-
- def output_expression_details_string (self):
- return ', '.join ([g1.name for g1 in self.expression_changed])
-
- def geo_details_string (self):
- results = [(d, g1,g2) for ((g1, g2), d) in self.geo_distances.items()]
- results.sort ()
- results.reverse ()
-
- return ', '.join (['%s: %f' % (g1.name, d) for (d, g1, g2) in results])
-
- def orphan_details_string (self):
- return ', '.join (['%s-None' % g1.name for (g1,g2) in self.orphans if g2==None])
-
- def geometric_distance (self):
- if self._geometric_distance == None:
- self.calc_geometric_distance ()
- return self._geometric_distance
-
- def orphan_count (self):
- if self._orphan_count == None:
- self.calc_orphan_count ()
-
- return self._orphan_count
-
- def output_expression_change_count (self):
- if self._expression_change_count == None:
- self.calc_output_exp_distance ()
- return self._expression_change_count
-
- def distance (self):
- return (self.output_expression_change_count (),
- self.orphan_count (),
- self.geometric_distance ())
-
-def read_signature_file (name):
- print 'reading', name
-
- entries = open (name).read ().split ('\n')
- def string_to_tup (s):
- return tuple (map (float, s.split (' ')))
-
- def string_to_entry (s):
- fields = s.split('@')
- fields[2] = string_to_tup (fields[2])
- fields[3] = string_to_tup (fields[3])
-
- return tuple (fields)
-
- entries = [string_to_entry (e) for e in entries
- if e and not e.startswith ('#')]
-
- grob_sigs = [GrobSignature (e) for e in entries]
- sig = SystemSignature (grob_sigs)
- return sig
-
-
-################################################################
-# different systems of a .ly file.
-
-hash_to_original_name = {}
-
-class FileLink:
- def __init__ (self, f1, f2):
- self._distance = None
- self.file_names = (f1, f2)
-
- def text_record_string (self):
- return '%-30f %-20s\n' % (self.distance (),
- self.name ()
- + os.path.splitext (self.file_names[1])[1]
- )
-
- def calc_distance (self):
- return 0.0
-
- def distance (self):
- if self._distance == None:
- self._distance = self.calc_distance ()
-
- return self._distance
-
- def source_file (self):
- for ext in ('.ly', '.ly.txt'):
- base = os.path.splitext (self.file_names[1])[0]
- f = base + ext
- if os.path.exists (f):
- return f
-
- return ''
-
- def name (self):
- base = os.path.basename (self.file_names[1])
- base = os.path.splitext (base)[0]
- base = hash_to_original_name.get (base, base)
- base = os.path.splitext (base)[0]
- return base
-
- def extension (self):
- return os.path.splitext (self.file_names[1])[1]
-
- def link_files_for_html (self, dest_dir):
- for f in self.file_names:
- link_file (f, os.path.join (dest_dir, f))
-
- def get_distance_details (self):
- return ''
-
- def get_cell (self, oldnew):
- return ''
-
- def get_file (self, oldnew):
- return self.file_names[oldnew]
-
- def html_record_string (self, dest_dir):
- dist = self.distance()
-
- details = self.get_distance_details ()
- if details:
- details_base = os.path.splitext (self.file_names[1])[0]
- details_base += '.details.html'
- fn = dest_dir + '/' + details_base
- open_write_file (fn).write (details)
-
- details = '<br>(<a href="%(details_base)s">details</a>)' % locals ()
-
- cell1 = self.get_cell (0)
- cell2 = self.get_cell (1)
-
- name = self.name () + self.extension ()
- file1 = self.get_file (0)
- file2 = self.get_file (1)
-
- return '''<tr>
-<td>
-%(dist)f
-%(details)s
-</td>
-<td>%(cell1)s<br><font size=-2><a href="%(file1)s"><tt>%(name)s</tt></font></td>
-<td>%(cell2)s<br><font size=-2><a href="%(file2)s"><tt>%(name)s</tt></font></td>
-</tr>''' % locals ()
-
-
-class FileCompareLink (FileLink):
- def __init__ (self, f1, f2):
- FileLink.__init__ (self, f1, f2)
- self.contents = (self.get_content (self.file_names[0]),
- self.get_content (self.file_names[1]))
-
-
- def calc_distance (self):
- ## todo: could use import MIDI to pinpoint
- ## what & where changed.
-
- if self.contents[0] == self.contents[1]:
- return 0.0
- else:
- return 100.0;
-
- def get_content (self, f):
- print 'reading', f
- s = open (f).read ()
- return s
-
-
-class GitFileCompareLink (FileCompareLink):
- def get_cell (self, oldnew):
- str = self.contents[oldnew]
-
- # truncate long lines
- str = '\n'.join ([l[:80] for l in str.split ('\n')])
-
-
- str = '<font size="-2"><pre>%s</pre></font>' % str
- return str
-
- def calc_distance (self):
- if self.contents[0] == self.contents[1]:
- d = 0.0
- else:
- d = 1.0001 *options.threshold
-
- return d
-
-
-class TextFileCompareLink (FileCompareLink):
- def calc_distance (self):
- import difflib
- diff = difflib.unified_diff (self.contents[0].strip().split ('\n'),
- self.contents[1].strip().split ('\n'),
- fromfiledate = self.file_names[0],
- tofiledate = self.file_names[1]
- )
-
- self.diff_lines = [l for l in diff]
- self.diff_lines = self.diff_lines[2:]
-
- return math.sqrt (float (len ([l for l in self.diff_lines if l[0] in '-+'])))
-
- def get_cell (self, oldnew):
- str = ''
- if oldnew == 1:
- str = '\n'.join ([d.replace ('\n','') for d in self.diff_lines])
- str = '<font size="-2"><pre>%s</pre></font>' % str
- return str
-
-class LogFileCompareLink (TextFileCompareLink):
- def get_content (self, f):
- c = TextFileCompareLink.get_content (self, f)
- c = re.sub ("\nProcessing `[^\n]+'\n", '', c)
- return c
-
-class ProfileFileLink (FileCompareLink):
- def __init__ (self, f1, f2):
- FileCompareLink.__init__ (self, f1, f2)
- self.results = [{}, {}]
-
- def get_cell (self, oldnew):
- str = ''
- for k in ('time', 'cells'):
- if oldnew==0:
- str += '%-8s: %d\n' % (k, int (self.results[oldnew][k]))
- else:
- str += '%-8s: %8d (%5.3f)\n' % (k, int (self.results[oldnew][k]),
- self.get_ratio (k))
-
- return '<pre>%s</pre>' % str
-
- def get_ratio (self, key):
- (v1,v2) = (self.results[0].get (key, -1),
- self.results[1].get (key, -1))
-
- if v1 <= 0 or v2 <= 0:
- return 0.0
-
- return (v1 - v2) / float (v1+v2)
-
- def calc_distance (self):
- for oldnew in (0,1):
- def note_info (m):
- self.results[oldnew][m.group(1)] = float (m.group (2))
-
- re.sub ('([a-z]+): ([-0-9.]+)\n',
- note_info, self.contents[oldnew])
-
- dist = 0.0
- factor = {
- 'time': 0.1,
- 'cells': 5.0,
- }
-
- for k in ('time', 'cells'):
- real_val = math.tan (self.get_ratio (k) * 0.5 * math.pi)
- dist += math.exp (math.fabs (real_val) * factor[k]) - 1
-
- dist = min (dist, 100)
- return dist
-
-
-class MidiFileLink (TextFileCompareLink):
- def get_content (self, oldnew):
- import midi
-
- data = FileCompareLink.get_content (self, oldnew)
- midi = midi.parse (data)
- tracks = midi[1]
-
- str = ''
- j = 0
- for t in tracks:
- str += 'track %d' % j
- j += 1
-
- for e in t:
- ev_str = repr (e)
- if re.search ('LilyPond [0-9.]+', ev_str):
- continue
-
- str += ' ev %s\n' % `e`
- return str
-
-
-
-class SignatureFileLink (FileLink):
- def __init__ (self, f1, f2 ):
- FileLink.__init__ (self, f1, f2)
- self.system_links = {}
-
- def add_system_link (self, link, number):
- self.system_links[number] = link
-
- def calc_distance (self):
- d = 0.0
-
- orphan_distance = 0.0
- for l in self.system_links.values ():
- d = max (d, l.geometric_distance ())
- orphan_distance += l.orphan_count ()
-
- return d + orphan_distance
-
- def add_file_compare (self, f1, f2):
- system_index = []
-
- def note_system_index (m):
- system_index.append (int (m.group (1)))
- return ''
-
- base1 = re.sub ("-([0-9]+).signature", note_system_index, f1)
- base2 = re.sub ("-([0-9]+).signature", note_system_index, f2)
-
- self.base_names = (os.path.normpath (base1),
- os.path.normpath (base2))
-
- s1 = read_signature_file (f1)
- s2 = read_signature_file (f2)
-
- link = SystemLink (s1, s2)
-
- self.add_system_link (link, system_index[0])
-
-
- def create_images (self, dest_dir):
-
- files_created = [[], []]
- for oldnew in (0, 1):
- pat = self.base_names[oldnew] + '.eps'
-
- for f in glob.glob (pat):
- infile = f
- outfile = (dest_dir + '/' + f).replace ('.eps', '.png')
- data_option = ''
- if options.local_data_dir:
- data_option = ('-slilypond-datadir=%s/../share/lilypond/current '
- % os.path.dirname(infile))
-
- mkdir (os.path.split (outfile)[0])
- cmd = ('gs -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4 '
- ' %(data_option)s '
- ' -r101 '
- ' -sOutputFile=%(outfile)s -dNOSAFER -dEPSCrop -q -dNOPAUSE '
- ' %(infile)s -c quit ') % locals ()
-
- files_created[oldnew].append (outfile)
- system (cmd)
-
- return files_created
-
- def link_files_for_html (self, dest_dir):
- FileLink.link_files_for_html (self, dest_dir)
- to_compare = [[], []]
-
- exts = []
- if options.create_images:
- to_compare = self.create_images (dest_dir)
- else:
- exts += ['.png', '-page*png']
-
- for ext in exts:
- for oldnew in (0,1):
- for f in glob.glob (self.base_names[oldnew] + ext):
- dst = dest_dir + '/' + f
- link_file (f, dst)
-
- if f.endswith ('.png'):
- to_compare[oldnew].append (f)
-
- if options.compare_images:
- for (old, new) in zip (to_compare[0], to_compare[1]):
- compare_png_images (old, new, dest_dir)
-
-
- def get_cell (self, oldnew):
- def img_cell (ly, img, name):
- if not name:
- name = 'source'
- else:
- name = '<tt>%s</tt>' % name
-
- return '''
-<a href="%(img)s">
-<img src="%(img)s" style="border-style: none; max-width: 500px;">
-</a><br>
-''' % locals ()
- def multi_img_cell (ly, imgs, name):
- if not name:
- name = 'source'
- else:
- name = '<tt>%s</tt>' % name
-
- imgs_str = '\n'.join (['''<a href="%s">
-<img src="%s" style="border-style: none; max-width: 500px;">
-</a><br>''' % (img, img)
- for img in imgs])
-
-
- return '''
-%(imgs_str)s
-''' % locals ()
-
-
-
- def cell (base, name):
- pat = base + '-page*.png'
- pages = glob.glob (pat)
-
- if pages:
- return multi_img_cell (base + '.ly', sorted (pages), name)
- else:
- return img_cell (base + '.ly', base + '.png', name)
-
-
-
- str = cell (os.path.splitext (self.file_names[oldnew])[0], self.name ())
- if options.compare_images and oldnew == 1:
- str = str.replace ('.png', '.compare.jpeg')
-
- return str
-
-
- def get_distance_details (self):
- systems = self.system_links.items ()
- systems.sort ()
-
- html = ""
- for (c, link) in systems:
- e = '<td>%d</td>' % c
- for d in link.distance ():
- e += '<td>%f</td>' % d
-
- e = '<tr>%s</tr>' % e
-
- html += e
-
- e = '<td>%d</td>' % c
- for s in (link.output_expression_details_string (),
- link.orphan_details_string (),
- link.geo_details_string ()):
- e += "<td>%s</td>" % s
-
-
- e = '<tr>%s</tr>' % e
- html += e
-
- original = self.name ()
- html = '''<html>
-<head>
-<title>comparison details for %(original)s</title>
-</head>
-<body>
-<table border=1>
-<tr>
-<th>system</th>
-<th>output</th>
-<th>orphan</th>
-<th>geo</th>
-</tr>
-
-%(html)s
-</table>
-
-</body>
-</html>
-''' % locals ()
- return html
-
-
-################################################################
-# Files/directories
-
-import glob
-import re
-
-def compare_signature_files (f1, f2):
- s1 = read_signature_file (f1)
- s2 = read_signature_file (f2)
-
- return SystemLink (s1, s2).distance ()
-
-def paired_files (dir1, dir2, pattern):
- """
- Search DIR1 and DIR2 for PATTERN.
-
- Return (PAIRED, MISSING-FROM-2, MISSING-FROM-1)
-
- """
-
- files = []
- for d in (dir1,dir2):
- found = [os.path.split (f)[1] for f in glob.glob (d + '/' + pattern)]
- found = dict ((f, 1) for f in found)
- files.append (found)
-
- pairs = []
- missing = []
- for f in files[0]:
- try:
- files[1].pop (f)
- pairs.append (f)
- except KeyError:
- missing.append (f)
-
- return (pairs, files[1].keys (), missing)
-
-class ComparisonData:
- def __init__ (self):
- self.result_dict = {}
- self.missing = []
- self.added = []
- self.file_links = {}
-
- def read_sources (self):
-
- ## ugh: drop the .ly.txt
- for (key, val) in self.file_links.items ():
-
- def note_original (match, ln=val):
- key = ln.name ()
- hash_to_original_name[key] = match.group (1)
- return ''
-
- sf = val.source_file ()
- if sf:
- re.sub (r'\\sourcefilename "([^"]+)"',
- note_original, open (sf).read ())
- else:
- print 'no source for', val
-
- def compare_trees (self, dir1, dir2):
- self.compare_directories (dir1, dir2)
-
- (root, dirs, files) = os.walk (dir1).next ()
- for d in dirs:
- d1 = os.path.join (dir1, d)
- d2 = os.path.join (dir2, d)
-
- if os.path.islink (d1) or os.path.islink (d2):
- continue
-
- if os.path.isdir (d2):
- self.compare_trees (d1, d2)
-
- def compare_directories (self, dir1, dir2):
- for ext in ['signature',
- 'midi',
- 'log',
- 'profile',
- 'gittxt']:
- (paired, m1, m2) = paired_files (dir1, dir2, '*.' + ext)
-
- self.missing += [(dir1, m) for m in m1]
- self.added += [(dir2, m) for m in m2]
-
- for p in paired:
- if (options.max_count
- and len (self.file_links) > options.max_count):
- continue
-
- f2 = dir2 + '/' + p
- f1 = dir1 + '/' + p
- self.compare_files (f1, f2)
-
- def compare_files (self, f1, f2):
- if f1.endswith ('signature'):
- self.compare_signature_files (f1, f2)
- else:
- ext = os.path.splitext (f1)[1]
- klasses = {
- '.midi': MidiFileLink,
- '.log' : LogFileCompareLink,
- '.profile': ProfileFileLink,
- '.gittxt': GitFileCompareLink,
- }
-
- if klasses.has_key (ext):
- self.compare_general_files (klasses[ext], f1, f2)
-
- def compare_general_files (self, klass, f1, f2):
- name = os.path.split (f1)[1]
-
- file_link = klass (f1, f2)
- self.file_links[name] = file_link
-
- def compare_signature_files (self, f1, f2):
- name = os.path.split (f1)[1]
- name = re.sub ('-[0-9]+.signature', '', name)
-
- file_link = None
- try:
- file_link = self.file_links[name]
- except KeyError:
- generic_f1 = re.sub ('-[0-9]+.signature', '.ly', f1)
- generic_f2 = re.sub ('-[0-9]+.signature', '.ly', f2)
- file_link = SignatureFileLink (generic_f1, generic_f2)
- self.file_links[name] = file_link
-
- file_link.add_file_compare (f1, f2)
-
- def write_changed (self, dest_dir, threshold):
- (changed, below, unchanged) = self.thresholded_results (threshold)
-
- str = '\n'.join ([os.path.splitext (link.file_names[1])[0]
- for link in changed])
- fn = dest_dir + '/changed.txt'
-
- open_write_file (fn).write (str)
-
- def thresholded_results (self, threshold):
- ## todo: support more scores.
- results = [(link.distance(), link)
- for link in self.file_links.values ()]
- results.sort ()
- results.reverse ()
-
- unchanged = [r for (d,r) in results if d == 0.0]
- below = [r for (d,r) in results if threshold >= d > 0.0]
- changed = [r for (d,r) in results if d > threshold]
-
- return (changed, below, unchanged)
-
- def write_text_result_page (self, filename, threshold):
- out = None
- if filename == '':
- out = sys.stdout
- else:
- print 'writing "%s"' % filename
- out = open_write_file (filename)
-
- (changed, below, unchanged) = self.thresholded_results (threshold)
-
-
- for link in changed:
- out.write (link.text_record_string ())
-
- out.write ('\n\n')
- out.write ('%d below threshold\n' % len (below))
- out.write ('%d unchanged\n' % len (unchanged))
-
- def create_text_result_page (self, dir1, dir2, dest_dir, threshold):
- self.write_text_result_page (dest_dir + '/index.txt', threshold)
-
- def create_html_result_page (self, dir1, dir2, dest_dir, threshold):
- dir1 = dir1.replace ('//', '/')
- dir2 = dir2.replace ('//', '/')
-
- (changed, below, unchanged) = self.thresholded_results (threshold)
-
-
- html = ''
- old_prefix = os.path.split (dir1)[1]
- for link in changed:
- html += link.html_record_string (dest_dir)
-
-
- short_dir1 = shorten_string (dir1)
- short_dir2 = shorten_string (dir2)
- html = '''<html>
-<table rules="rows" border bordercolor="blue">
-<tr>
-<th>distance</th>
-<th>%(short_dir1)s</th>
-<th>%(short_dir2)s</th>
-</tr>
-%(html)s
-</table>
-</html>''' % locals()
-
- html += ('<p>')
- below_count = len (below)
-
- if below_count:
- html += ('<p>%d below threshold</p>' % below_count)
-
- html += ('<p>%d unchanged</p>' % len (unchanged))
-
- dest_file = dest_dir + '/index.html'
- open_write_file (dest_file).write (html)
-
-
- for link in changed:
- link.link_files_for_html (dest_dir)
-
-
- def print_results (self, threshold):
- self.write_text_result_page ('', threshold)
-
-def compare_trees (dir1, dir2, dest_dir, threshold):
- data = ComparisonData ()
- data.compare_trees (dir1, dir2)
- data.read_sources ()
-
-
- data.print_results (threshold)
-
- if os.path.isdir (dest_dir):
- system ('rm -rf %s '% dest_dir)
-
- data.write_changed (dest_dir, threshold)
- data.create_html_result_page (dir1, dir2, dest_dir, threshold)
- data.create_text_result_page (dir1, dir2, dest_dir, threshold)
-
-################################################################
-# TESTING
-
-def mkdir (x):
- if not os.path.isdir (x):
- print 'mkdir', x
- os.makedirs (x)
-
-def link_file (x, y):
- mkdir (os.path.split (y)[0])
- try:
- print x, '->', y
- os.link (x, y)
- except OSError, z:
- print 'OSError', x, y, z
- raise OSError
-
-def open_write_file (x):
- d = os.path.split (x)[0]
- mkdir (d)
- return open (x, 'w')
-
-
-def system (x):
-
- print 'invoking', x
- stat = os.system (x)
- assert stat == 0
-
-
-def test_paired_files ():
- print paired_files (os.environ["HOME"] + "/src/lilypond/scripts/",
- os.environ["HOME"] + "/src/lilypond-stable/buildscripts/", '*.py')
-
-
-def test_compare_trees ():
- system ('rm -rf dir1 dir2')
- system ('mkdir dir1 dir2')
- system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir1')
- system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir2')
- system ('cp 20expr{-*.signature,.ly,.png,.eps,.log,.profile} dir1')
- system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir2/')
- system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir1/')
- system ('cp 19-1.signature 19.sub-1.signature')
- system ('cp 19.ly 19.sub.ly')
- system ('cp 19.profile 19.sub.profile')
- system ('cp 19.log 19.sub.log')
- system ('cp 19.png 19.sub.png')
- system ('cp 19.eps 19.sub.eps')
-
- system ('cp 20multipage* dir1')
- system ('cp 20multipage* dir2')
- system ('cp 19multipage-1.signature dir2/20multipage-1.signature')
-
-
- system ('mkdir -p dir1/subdir/ dir2/subdir/')
- system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir1/subdir/')
- system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir2/subdir/')
- system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir2/')
- system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir1/')
- system ('echo HEAD is 1 > dir1/tree.gittxt')
- system ('echo HEAD is 2 > dir2/tree.gittxt')
-
- ## introduce differences
- system ('cp 19-1.signature dir2/20-1.signature')
- system ('cp 19.profile dir2/20.profile')
- system ('cp 19.png dir2/20.png')
- system ('cp 19multipage-page1.png dir2/20multipage-page1.png')
- system ('cp 20-1.signature dir2/subdir/19.sub-1.signature')
- system ('cp 20.png dir2/subdir/19.sub.png')
- system ("sed 's/: /: 1/g' 20.profile > dir2/subdir/19.sub.profile")
-
- ## radical diffs.
- system ('cp 19-1.signature dir2/20grob-1.signature')
- system ('cp 19-1.signature dir2/20grob-2.signature')
- system ('cp 19multipage.midi dir1/midi-differ.midi')
- system ('cp 20multipage.midi dir2/midi-differ.midi')
- system ('cp 19multipage.log dir1/log-differ.log')
- system ('cp 19multipage.log dir2/log-differ.log && echo different >> dir2/log-differ.log && echo different >> dir2/log-differ.log')
-
- compare_trees ('dir1', 'dir2', 'compare-dir1dir2', options.threshold)
-
-
-def test_basic_compare ():
- ly_template = r"""
-
-\version "2.10.0"
-#(define default-toplevel-book-handler
- print-book-with-defaults-as-systems )
-
-#(ly:set-option (quote no-point-and-click))
-
-\sourcefilename "my-source.ly"
-
-%(papermod)s
-\header { tagline = ##f }
-\score {
-<<
-\new Staff \relative c {
- c4^"%(userstring)s" %(extragrob)s
- }
-\new Staff \relative c {
- c4^"%(userstring)s" %(extragrob)s
- }
->>
-\layout{}
-}
-
-"""
-
- dicts = [{ 'papermod' : '',
- 'name' : '20',
- 'extragrob': '',
- 'userstring': 'test' },
- { 'papermod' : '#(set-global-staff-size 19.5)',
- 'name' : '19',
- 'extragrob': '',
- 'userstring': 'test' },
- { 'papermod' : '',
- 'name' : '20expr',
- 'extragrob': '',
- 'userstring': 'blabla' },
- { 'papermod' : '',
- 'name' : '20grob',
- 'extragrob': 'r2. \\break c1',
- 'userstring': 'test' },
- ]
-
- for d in dicts:
- open (d['name'] + '.ly','w').write (ly_template % d)
-
- names = [d['name'] for d in dicts]
-
- system ('lilypond -ddump-profile -dseparate-log-files -ddump-signatures --png -dbackend=eps ' + ' '.join (names))
-
-
- multipage_str = r'''
- #(set-default-paper-size "a6")
- \score {
- \relative {c1 \pageBreak c1 }
- \layout {}
- \midi {}
- }
- '''
-
- open ('20multipage.ly', 'w').write (multipage_str.replace ('c1', 'd1'))
- open ('19multipage.ly', 'w').write ('#(set-global-staff-size 19.5)\n' + multipage_str)
- system ('lilypond -dseparate-log-files -ddump-signatures --png 19multipage 20multipage ')
-
- test_compare_signatures (names)
-
-def test_compare_signatures (names, timing=False):
- import time
-
- times = 1
- if timing:
- times = 100
-
- t0 = time.clock ()
-
- count = 0
- for t in range (0, times):
- sigs = dict ((n, read_signature_file ('%s-1.signature' % n)) for n in names)
- count += 1
-
- if timing:
- print 'elapsed', (time.clock() - t0)/count
-
-
- t0 = time.clock ()
- count = 0
- combinations = {}
- for (n1, s1) in sigs.items():
- for (n2, s2) in sigs.items():
- combinations['%s-%s' % (n1, n2)] = SystemLink (s1,s2).distance ()
- count += 1
-
- if timing:
- print 'elapsed', (time.clock() - t0)/count
-
- results = combinations.items ()
- results.sort ()
- for k,v in results:
- print '%-20s' % k, v
-
- assert combinations['20-20'] == (0.0,0.0,0.0)
- assert combinations['20-20expr'][0] > 0.0
- assert combinations['20-19'][2] < 10.0
- assert combinations['20-19'][2] > 0.0
-
-
-def run_tests ():
- dir = 'test-output-distance'
-
- do_clean = not os.path.exists (dir)
-
- print 'test results in ', dir
- if do_clean:
- system ('rm -rf ' + dir)
- system ('mkdir ' + dir)
-
- os.chdir (dir)
- if do_clean:
- test_basic_compare ()
-
- test_compare_trees ()
-
-################################################################
-#
-
-def main ():
- p = optparse.OptionParser ("output-distance - compare LilyPond formatting runs")
- p.usage = 'output-distance.py [options] tree1 tree2'
-
- p.add_option ('', '--test-self',
- dest="run_test",
- action="store_true",
- help='run test method')
-
- p.add_option ('--max-count',
- dest="max_count",
- metavar="COUNT",
- type="int",
- default=0,
- action="store",
- help='only analyze COUNT signature pairs')
-
- p.add_option ('', '--threshold',
- dest="threshold",
- default=0.3,
- action="store",
- type="float",
- help='threshold for geometric distance')
-
- p.add_option ('--no-compare-images',
- dest="compare_images",
- default=True,
- action="store_false",
- help="Don't run graphical comparisons")
-
- p.add_option ('--create-images',
- dest="create_images",
- default=False,
- action="store_true",
- help="Create PNGs from EPSes")
-
-
- p.add_option ('--local-datadir',
- dest="local_data_dir",
- default=False,
- action="store_true",
- help='whether to use the share/lilypond/ directory in the test directory')
-
- p.add_option ('-o', '--output-dir',
- dest="output_dir",
- default=None,
- action="store",
- type="string",
- help='where to put the test results [tree2/compare-tree1tree2]')
-
- global options
- (options, args) = p.parse_args ()
-
- if options.run_test:
- run_tests ()
- sys.exit (0)
-
- if len (args) != 2:
- p.print_usage()
- sys.exit (2)
-
- name = options.output_dir
- if not name:
- name = args[0].replace ('/', '')
- name = os.path.join (args[1], 'compare-' + shorten_string (name))
-
- compare_trees (args[0], args[1], name, options.threshold)
-
-if __name__ == '__main__':
- main()
-
+++ /dev/null
-#!@FONTFORGE@
-
-Open($1);
-MergeKern($2)
-
-
-# The AFM files of `New Century Schoolbook' family as distributed within the
-# urw-fonts-1.0.7pre41.tar.bz2 archive contain a bunch of ligatures which
-# shouldn't be active by default:
-#
-# T + M -> trademark
-# N + o -> afii61352
-# i + j -> ij
-# I + J -> IJ
-#
-# This font bundle is shipped by Fedora Core 6 and other GNU/Linux
-# distributions; we simply remove those ligatures.
-
-SelectIf("trademark", "trademark", \
- "afii61352", "afii61352", \
- "ij", "ij", \
- "IJ", "IJ");
-if (Strtol($version) < 20070501)
- RemoveATT("Ligature", "*", "*");
-else
- RemovePosSub("*");
-endif
-
-Generate($3 + $fontname + ".otf");
-
-# EOF
+++ /dev/null
-#!@PYTHON@
-
-"""
-Postprocess HTML files:
-add footer, tweak links, add language selection menu.
-"""
-import re
-import os
-import time
-import operator
-
-import langdefs
-
-# This is to try to make the docball not too big with almost duplicate files
-# see process_links()
-non_copied_pages = ['Documentation/user/out-www/lilypond-big-page',
- 'Documentation/user/out-www/lilypond-internals-big-page',
- 'Documentation/user/out-www/lilypond-learning-big-page',
- 'Documentation/user/out-www/lilypond-program-big-page',
- 'Documentation/user/out-www/music-glossary-big-page',
- 'out-www/examples',
- 'Documentation/topdocs',
- 'Documentation/bibliography',
- 'Documentation/out-www/THANKS',
- 'Documentation/out-www/DEDICATION',
- 'Documentation/out-www/devel',
- 'input/']
-
-def _doc (s):
- return s
-
-header = r"""
-"""
-
-footer = '''
-<div class="footer">
-<p class="footer_version">
-%(footer_name_version)s
-</p>
-<p class="footer_report">
-%(footer_report_links)s
-</p>
-</div>
-'''
-footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).')
-# ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process
-footer_report_links = _doc ('Your <a href="%(suggest_Docs_url)s">suggestions for the documentation</a> are welcome, please report errors to our <a href="%(mail_address_url)s">bug list</a>.')
-
-
-mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs'
-suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding'
-
-header_tag = '<!-- header_tag -->'
-header_tag_re = re.compile (header_tag)
-
-footer_tag = '<!-- footer_tag -->'
-footer_tag_re = re.compile (footer_tag)
-
-lang_available = _doc ("Other languages: %s.")
-browser_lang = _doc ('About <A HREF="%s">automatic language selection</A>.')
-browser_language_url = "/web/about/browser-language"
-
-LANGUAGES_TEMPLATE = '''
-<p id="languages">
- %(language_available)s
- <br/>
- %(browser_language)s
-</p>
-'''
-
-
-html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$')
-pages_dict = {}
-
-def build_pages_dict (filelist):
- """Build dictionary of available translations of each page"""
- global pages_dict
- for f in filelist:
- m = html_re.match (f)
- if m:
- g = m.groups()
- if len (g) <= 1 or g[1] == None:
- e = ''
- else:
- e = g[1]
- if not g[0] in pages_dict:
- pages_dict[g[0]] = [e]
- else:
- pages_dict[g[0]].append (e)
-
-def source_links_replace (m, source_val):
- return 'href="' + os.path.join (source_val, m.group (1)) + '"'
-
-splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\
-Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\
-lilypond-learning))/')
-
-snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets')
-user_ref_re = re.compile ('href="(?:\.\./)?lilypond\
-(-internals|-learning|-program|(?!-snippets))')
-
-docindex_link_re = re.compile (r'href="index.html"')
-
-
-## Windows does not support symlinks.
-# This function avoids creating symlinks for splitted HTML manuals
-# Get rid of symlinks in GNUmakefile.in (local-WWW-post)
-# this also fixes missing PNGs only present in translated docs
-def hack_urls (s, prefix):
- if splitted_docs_re.match (prefix):
- s = re.sub ('(href|src)="(../lily-.*?|.*?[.]png)"', '\\1="../\\2"', s)
-
- # fix xrefs between documents in different directories ad hoc
- if 'user/out-www/lilypond' in prefix:
- s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s)
- elif 'input/lsr' in prefix:
- s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s)
-
- # we also need to replace in the lsr, which is already processed above!
- if 'input/' in prefix or 'Documentation/topdocs' in prefix:
- # fix the link from the regtest, lsr and topdoc pages to the doc index
- # (rewrite prefix to obtain the relative path of the doc index page)
- rel_link = re.sub (r'out-www/.*$', '', prefix)
- rel_link = re.sub (r'[^/]*/', '../', rel_link)
- if 'input/regression' in prefix:
- indexfile = "Documentation/devel"
- else:
- indexfile = "index"
- s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s)
-
- source_path = os.path.join (os.path.dirname (prefix), 'source')
- if not os.path.islink (source_path):
- return s
- source_val = os.readlink (source_path)
- return re.sub ('href="source/(.*?)"', lambda m: source_links_replace (m, source_val), s)
-
-body_tag_re = re.compile ('(?i)<body([^>]*)>')
-html_tag_re = re.compile ('(?i)<html>')
-doctype_re = re.compile ('(?i)<!DOCTYPE')
-doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
-css_re = re.compile ('(?i)<link rel="stylesheet" type="text/css" ([^>]*)href="[^">]*?lilypond.*\.css"([^>]*)>')
-end_head_tag_re = re.compile ('(?i)</head>')
-css_link = """ <link rel="stylesheet" type="text/css" title="Patrick McCarty's design" href="%(rel)sDocumentation/lilypond-mccarty.css">
- <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond.css" title="Andrew Hawryluk's design">
- <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond-blue.css" title="Kurt Kroon's blue design">
- <!--[if lte IE 7]>
- <link href="%(rel)sDocumentation/lilypond-ie-fixes.css" rel="stylesheet" type="text/css">
- <![endif]-->
-"""
-
-
-def add_header (s, prefix):
- """Add header (<body>, doctype and CSS)"""
- if header_tag_re.search (s) == None:
- body = '<body\\1>'
- (s, n) = body_tag_re.subn (body + header, s, 1)
- if not n:
- (s, n) = html_tag_re.subn ('<html>' + header, s, 1)
- if not n:
- s = header + s
-
- s = header_tag + '\n' + s
-
- if doctype_re.search (s) == None:
- s = doctype + s
-
- if css_re.search (s) == None:
- depth = (prefix.count ('/') - 1) * '../'
- s = end_head_tag_re.sub ((css_link % {'rel': depth}) + '</head>', s)
- return s
-
-title_tag_re = re.compile ('.*?<title>(.*?)</title>', re.DOTALL)
-AT_web_title_re = re.compile ('@WEB-TITLE@')
-
-def add_title (s):
- # urg
- # maybe find first node?
- fallback_web_title = '-- --'
- m = title_tag_re.match (s)
- if m:
- fallback_web_title = m.group (1)
- s = AT_web_title_re.sub (fallback_web_title, s)
- return s
-
-footer_insert_re = re.compile ('<!--\s*FOOTER\s*-->')
-end_body_re = re.compile ('(?i)</body>')
-end_html_re = re.compile ('(?i)</html>')
-
-def add_footer (s, footer_text):
- """add footer"""
- (s, n) = footer_insert_re.subn (footer_text + '\n' + '<!-- FOOTER -->', s, 1)
- if not n:
- (s, n) = end_body_re.subn (footer_text + '\n' + '</body>', s, 1)
- if not n:
- (s, n) = end_html_re.subn (footer_text + '\n' + '</html>', s, 1)
- if not n:
- s += footer_text + '\n'
- return s
-
-def find_translations (prefix, lang_ext):
- """find available translations of a page"""
- available = []
- missing = []
- for l in langdefs.LANGUAGES:
- e = l.webext
- if lang_ext != e:
- if e in pages_dict[prefix]:
- available.append (l)
- elif lang_ext == '' and l.enabled and reduce (operator.and_,
- [not prefix.startswith (s)
- for s in non_copied_pages]):
- # English version of missing translated pages will be written
- missing.append (e)
- return available, missing
-
-online_links_re = re.compile ('''(href|src)=['"]\
-((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\
-([.]html)(#[^"']*|)['"]''')
-offline_links_re = re.compile ('href=[\'"]\
-((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]')
-big_page_name_re = re.compile ('''(.+?)-big-page''')
-
-def process_i18n_big_page_links (match, prefix, lang_ext):
- big_page_name = big_page_name_re.match (match.group (1))
- if big_page_name:
- destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix),
- big_page_name.group (0)))
- if not lang_ext in pages_dict[destination_path]:
- return match.group (0)
- return 'href="' + match.group (1) + '.' + lang_ext \
- + match.group (2) + match.group (3) + '"'
-
-def process_links (s, prefix, lang_ext, file_name, missing, target):
- page_flavors = {}
- if target == 'online':
- # Strip .html, suffix for auto language selection (content
- # negotiation). The menu must keep the full extension, so do
- # this before adding the menu.
- page_flavors[file_name] = \
- [lang_ext, online_links_re.sub ('\\1="\\2\\4"', s)]
- elif target == 'offline':
- # in LANG doc index: don't rewrite .html suffixes
- # as not all .LANG.html pages exist;
- # the doc index should be translated and contain links with the right suffixes
- if prefix == 'Documentation/out-www/index':
- page_flavors[file_name] = [lang_ext, s]
- elif lang_ext == '':
- page_flavors[file_name] = [lang_ext, s]
- for e in missing:
- page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = \
- [e, offline_links_re.sub ('href="\\1.' + e + '\\2\\3"', s)]
- else:
- # For saving bandwidth and disk space, we don't duplicate big pages
- # in English, so we must process translated big pages links differently.
- if 'big-page' in prefix:
- page_flavors[file_name] = \
- [lang_ext,
- offline_links_re.sub \
- (lambda match: process_i18n_big_page_links (match, prefix, lang_ext),
- s)]
- else:
- page_flavors[file_name] = \
- [lang_ext,
- offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)]
- return page_flavors
-
-def add_menu (page_flavors, prefix, available, target, translation):
- for k in page_flavors:
- language_menu = ''
- languages = ''
- if page_flavors[k][0] != '':
- t = translation[page_flavors[k][0]]
- else:
- t = _doc
- for lang in available:
- lang_file = lang.file_name (os.path.basename (prefix), '.html')
- if language_menu != '':
- language_menu += ', '
- language_menu += '<a href="%s">%s</a>' % (lang_file, t (lang.name))
- if target == 'offline':
- browser_language = ''
- elif target == 'online':
- browser_language = t (browser_lang) % browser_language_url
- if language_menu:
- language_available = t (lang_available) % language_menu
- languages = LANGUAGES_TEMPLATE % vars ()
- page_flavors[k][1] = add_footer (page_flavors[k][1], languages)
- return page_flavors
-
-
-def process_html_files (package_name = '',
- package_version = '',
- target = 'offline',
- name_filter = lambda s: s):
- """Add header, footer and tweak links to a number of HTML files
-
- Arguments:
- package_name=NAME set package_name to NAME
- package_version=VERSION set package version to VERSION
- targets=offline|online set page processing depending on the target
- offline is for reading HTML pages locally
- online is for hosting the HTML pages on a website with content
- negotiation
- name_filter a HTML file name filter
- """
- translation = langdefs.translation
- localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
-
- if "http://" in mail_address:
- mail_address_url = mail_address
- else:
- mail_address_url= 'mailto:' + mail_address
-
- versiontup = package_version.split ('.')
- branch_str = _doc ('stable-branch')
- if int (versiontup[1]) % 2:
- branch_str = _doc ('development-branch')
-
- # Initialize dictionaries for string formatting
- subst = {}
- subst[''] = dict ([i for i in globals ().items() if type (i[1]) is str])
- subst[''].update (dict ([i for i in locals ().items() if type (i[1]) is str]))
- for l in translation:
- e = langdefs.LANGDICT[l].webext
- if e:
- subst[e] = {}
- for name in subst['']:
- subst[e][name] = translation[l] (subst[''][name])
- # Do deeper string formatting as early as possible,
- # so only one '%' formatting pass is needed later
- for e in subst:
- subst[e]['footer_name_version'] = subst[e]['footer_name_version'] % subst[e]
- subst[e]['footer_report_links'] = subst[e]['footer_report_links'] % subst[e]
-
- for prefix, ext_list in pages_dict.items ():
- for lang_ext in ext_list:
- file_name = langdefs.lang_file_name (prefix, lang_ext, '.html')
- in_f = open (file_name)
- s = in_f.read()
- in_f.close()
-
- s = s.replace ('%', '%%')
- s = hack_urls (s, prefix)
- s = add_header (s, prefix)
-
- ### add footer
- if footer_tag_re.search (s) == None:
- s = add_footer (s, footer_tag + footer)
-
- available, missing = find_translations (prefix, lang_ext)
- page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target)
- # Add menu after stripping: must not have autoselection for language menu.
- page_flavors = add_menu (page_flavors, prefix, available, target, translation)
- for k in page_flavors:
- page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]]
- out_f = open (name_filter (k), 'w')
- out_f.write (page_flavors[k][1])
- out_f.close()
- # if the page is translated, a .en.html symlink is necessary for content negotiation
- if target == 'online' and ext_list != ['']:
- os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html'))
+++ /dev/null
-#! @PYTHON@
-
-import os
-import re
-import sys
-
-frm = re.compile (sys.argv[1], re.MULTILINE)
-to = sys.argv[2]
-
-if not sys.argv[3:] or sys.argv[3] == '-':
- sys.stdout.write (re.sub (frm, to, sys.stdin.read ()))
-for file in sys.argv[3:]:
- s = open (file).read ()
- name = os.path.basename (file)
- base, ext = os.path.splitext (name)
- t = re.sub (frm, to % locals (), s)
- if s != t:
- if 1:
- os.system ('mv %(file)s %(file)s~~' % locals ())
- h = open (file, "w")
- h.write (t)
- h.close ()
- else:
- sys.stdout.write (t)
+++ /dev/null
-#!/usr/bin/env python
-import os
-import sys
-
-for i in sys.argv[1:]:
- print os.path.realpath (i)
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# tely-gettext.py
-
-# Temporary script that helps translated docs sources conversion
-# for texi2html processing
-
-# USAGE: tely-gettext.py BUILDSCRIPT-DIR LOCALEDIR LANG FILES
-
-print "tely_gettext.py"
-
-import sys
-import re
-import os
-import gettext
-
-if len (sys.argv) > 3:
- buildscript_dir, localedir, lang = sys.argv[1:4]
-else:
- print """USAGE: tely-gettext.py BUILDSCRIPT-DIR LOCALEDIR LANG FILES
- For example buildscripts/tely-gettext.py buildscripts Documentation/po/out-www de Documentation/de/user/*.tely"""
- sys.exit (1)
-
-sys.path.append (buildscript_dir)
-import langdefs
-
-double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
-t = gettext.translation('lilypond-doc', localedir, [lang])
-_doc = t.gettext
-
-include_re = re.compile (r'@include (.*?)$', re.M)
-whitespaces = re.compile (r'\s+')
-ref_re = re.compile (r'(?ms)@(ruser|rprogram|ref|rlearning)\{(.*?)\}')
-node_section_re = re.compile (r'@node (.*?)\n@((?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) (.*?)\n')
-menu_entry_re = re.compile (r'\* (.*?)::')
-
-def ref_gettext (m):
- r = whitespaces.sub (' ', m.group (2))
- return '@' + m.group (1) + '{' + _doc (r) + '}'
-
-def node_gettext (m):
- return '@node ' + _doc (m.group (1)) + '\n@' + \
- m.group (2) + ' ' + _doc (m.group (3)) + \
- '\n@translationof ' + m.group (1) + '\n'
-
-def menu_entry_gettext (m):
- return '* ' + _doc (m.group (1)) + '::'
-
-def process_file (filename):
- print "Processing %s" % filename
- f = open (filename, 'r')
- page = f.read ()
- f.close()
- page = node_section_re.sub (node_gettext, page)
- page = ref_re.sub (ref_gettext, page)
- page = menu_entry_re.sub (menu_entry_gettext, page)
- page = page.replace ("""-- SKELETON FILE --
-When you actually translate this file, please remove these lines as
-well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", """@c -- SKELETON FILE --""")
- page = page.replace ('UNTRANSLATED NODE: IGNORE ME', "@c UNTRANSLATED NODE: IGNORE ME")
- includes = [whitespaces.sub ('', f) for f in include_re.findall (page)]
- f = open (filename, 'w')
- f.write (page)
- f.close ()
- dir = os.path.dirname (filename)
- for file in includes:
- p = os.path.join (dir, file)
- if os.path.exists (p):
- process_file (p)
-
-for filename in sys.argv[4:]:
- process_file (filename)
+++ /dev/null
-#!@PYTHON@
-# -*- coding: utf-8 -*-
-# texi-gettext.py
-
-# USAGE: texi-gettext.py [-o OUTDIR] LANG FILES
-#
-# -o OUTDIR specifies that output files should rather be written in OUTDIR
-#
-
-print "texi_gettext.py"
-
-import sys
-import re
-import os
-import getopt
-
-import langdefs
-
-optlist, args = getopt.getopt (sys.argv[1:],'o:')
-lang = args[0]
-files = args[1:]
-
-outdir = '.'
-for x in optlist:
- if x[0] == '-o':
- outdir = x[1]
-
-double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
-_doc = langdefs.translation[lang]
-
-include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M)
-whitespaces = re.compile (r'\s+')
-ref_re = re.compile (r'(?ms)@(rglos|ruser|rprogram|ref)(\{)(.*?)(\})')
-node_section_re = re.compile (r'@(node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading)( )(.*?)(\n)')
-menu_entry_re = re.compile (r'\* (.*?)::')
-
-def title_gettext (m):
- if m.group (2) == '{':
- r = whitespaces.sub (' ', m.group (3))
- else:
- r = m.group (3)
- return '@' + m.group (1) + m.group (2) + _doc (r) + m.group (4)
-
-def menu_entry_gettext (m):
- return '* ' + _doc (m.group (1)) + '::'
-
-def include_replace (m, filename):
- if os.path.exists (os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'):
- return '@include ' + m.group(1) + '.pdftexi'
- return m.group(0)
-
-def process_file (filename):
- print "Processing %s" % filename
- f = open (filename, 'r')
- page = f.read ()
- f.close()
- page = node_section_re.sub (title_gettext, page)
- page = ref_re.sub (title_gettext, page)
- page = menu_entry_re.sub (menu_entry_gettext, page)
- page = page.replace ("""-- SKELETON FILE --
-When you actually translate this file, please remove these lines as
-well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", '')
- page = page.replace ('UNTRANSLATED NODE: IGNORE ME', _doc ("This section has not been translated yet; please refer to the manual in English."))
- includes = include_re.findall (page)
- page = include_re.sub (lambda m: include_replace (m, filename), page)
- p = os.path.join (outdir, filename) [:-4] + 'pdftexi'
- f = open (p, 'w')
- f.write (page)
- f.close ()
- dir = os.path.dirname (filename)
- for file in includes:
- p = os.path.join (dir, file) + '.texi'
- if os.path.exists (p):
- process_file (p)
-
-for filename in files:
- process_file (filename)
+++ /dev/null
-#!@PYTHON@
-# texi-langutils.py
-
-# WARNING: this script can't find files included in a different directory
-
-import sys
-import re
-import getopt
-import os
-
-import langdefs
-
-def read_pipe (command):
- print command
- pipe = os.popen (command)
- output = pipe.read ()
- if pipe.close ():
- print "pipe failed: %(command)s" % locals ()
- return output
-
-
-optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext'])
-process_includes = not ('-n', '') in optlist # -n don't process @include's in texinfo files
-
-make_gettext = ('--gettext', '') in optlist # --gettext generate a node list from a Texinfo source
-make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source
-
-output_file = 'doc.pot'
-
-# @untranslated should be defined as a macro in Texinfo source
-node_blurb = '''@untranslated
-'''
-doclang = ''
-head_committish = read_pipe ('git-rev-parse HEAD')
-intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*-
-@c This file is part of %(topfile)s
-@ignore
- Translation of GIT committish: %(head_committish)s
- When revising a translation, copy the HEAD committish of the
- version that you are working on. See TRANSLATION for details.
-@end ignore
-'''
-
-end_blurb = """
-@c -- SKELETON FILE --
-"""
-
-for x in optlist:
- if x[0] == '-o': # -o NAME set PO output file name to NAME
- output_file = x[1]
- elif x[0] == '-d': # -d DIR set working directory to DIR
- os.chdir (x[1])
- elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB
- node_blurb = x[1]
- elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB
- intro_blurb = x[1]
- elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG
- doclang = '; documentlanguage: ' + x[1]
-
-texinfo_with_menus_re = re.compile (r"^(\*) +([^:\n]+)::.*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.*?)$|@(rglos){(.+?)}", re.M)
-
-texinfo_re = re.compile (r"^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M)
-
-ly_string_re = re.compile (r'^([a-zA-Z]+)[\t ]*=|%+[\t ]*(.*)$|\\(?:new|context)\s+(?:[a-zA-Z]*?(?:Staff(?:Group)?|Voice|FiguredBass|FretBoards|Names|Devnull))\s+=\s+"?([a-zA-Z]+)"?\s+')
-lsr_verbatim_ly_re = re.compile (r'% begin verbatim$')
-texinfo_verbatim_ly_re = re.compile (r'^@lilypond\[.*?verbatim')
-
-def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None, scan_ly=False):
- try:
- f = open (texifilename, 'r')
- texifile = f.read ()
- f.close ()
- printedfilename = texifilename.replace ('../','')
- includes = []
-
- # process ly var names and comments
- if output_file and (scan_ly or texifilename.endswith ('.ly')):
- lines = texifile.splitlines ()
- i = 0
- in_verb_ly_block = False
- if texifilename.endswith ('.ly'):
- verbatim_ly_re = lsr_verbatim_ly_re
- else:
- verbatim_ly_re = texinfo_verbatim_ly_re
- for i in range (len (lines)):
- if verbatim_ly_re.search (lines[i]):
- in_verb_ly_block = True
- elif lines[i].startswith ('@end lilypond'):
- in_verb_ly_block = False
- elif in_verb_ly_block:
- for (var, comment, context_id) in ly_string_re.findall (lines[i]):
- if var:
- output_file.write ('# ' + printedfilename + ':' + \
- str (i + 1) + ' (variable)\n_(r"' + var + '")\n')
- elif comment:
- output_file.write ('# ' + printedfilename + ':' + \
- str (i + 1) + ' (comment)\n_(r"' + \
- comment.replace ('"', '\\"') + '")\n')
- elif context_id:
- output_file.write ('# ' + printedfilename + ':' + \
- str (i + 1) + ' (context id)\n_(r"' + \
- context_id + '")\n')
-
- # process Texinfo node names and section titles
- if write_skeleton:
- g = open (os.path.basename (texifilename), 'w')
- subst = globals ()
- subst.update (locals ())
- g.write (i_blurb % subst)
- tutu = texinfo_with_menus_re.findall (texifile)
- node_trigger = False
- for item in tutu:
- if item[0] == '*':
- g.write ('* ' + item[1] + '::\n')
- elif output_file and item[4] == 'rglos':
- output_file.write ('_(r"' + item[5] + '") # @rglos in ' + printedfilename + '\n')
- elif item[2] == 'menu':
- g.write ('@menu\n')
- elif item[2] == 'end menu':
- g.write ('@end menu\n\n')
- else:
- g.write ('@' + item[2] + ' ' + item[3] + '\n')
- if node_trigger:
- g.write (n_blurb)
- node_trigger = False
- elif item[2] == 'include':
- includes.append (item[3])
- else:
- if output_file:
- output_file.write ('# @' + item[2] + ' in ' + \
- printedfilename + '\n_(r"' + item[3].strip () + '")\n')
- if item[2] == 'node':
- node_trigger = True
- g.write (end_blurb)
- g.close ()
-
- elif output_file:
- toto = texinfo_re.findall (texifile)
- for item in toto:
- if item[0] == 'include':
- includes.append(item[1])
- elif item[2] == 'rglos':
- output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n')
- else:
- output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n')
-
- if process_includes:
- dir = os.path.dirname (texifilename)
- for item in includes:
- process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file, scan_ly)
- except IOError, (errno, strerror):
- sys.stderr.write ("I/O error(%s): %s: %s\n" % (errno, texifilename, strerror))
-
-
-if intro_blurb != '':
- intro_blurb += '\n\n'
-if node_blurb != '':
- node_blurb = '\n' + node_blurb + '\n\n'
-if make_gettext:
- node_list_filename = 'node_list'
- node_list = open (node_list_filename, 'w')
- node_list.write ('# -*- coding: utf-8 -*-\n')
- for texi_file in texi_files:
- # Urgly: scan ly comments and variable names only in English doco
- is_english_doc = 'Documentation/user' in texi_file
- process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
- os.path.basename (texi_file), node_list,
- scan_ly=is_english_doc)
- for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'):
- node_list.write ('_(r"' + word + '")\n')
- node_list.close ()
- os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename)
-else:
- for texi_file in texi_files:
- process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
- os.path.basename (texi_file))
+++ /dev/null
-#!@PYTHON@
-# texi-skeleton-update.py
-
-import sys
-import glob
-import os
-import shutil
-
-sys.stderr.write ('texi-skeleton-update.py\n')
-
-orig_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[1] + '/*.ite??')])
-new_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[2] + '/*.ite??')])
-
-for f in new_skeletons:
- if f in orig_skeletons:
- g = open (os.path.join (sys.argv[1], f), 'r').read ()
- if '-- SKELETON FILE --' in g:
- sys.stderr.write ("Updating %s...\n" % f)
- shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
- elif f != 'fdl.itexi':
- sys.stderr.write ("Copying new file %s...\n" % f)
- shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
-
-for f in orig_skeletons.difference (new_skeletons):
- sys.stderr.write ("Warning: outdated skeleton file %s\n" % f)
+++ /dev/null
-#!@PYTHON@
-
-import getopt
-import os
-import re
-import sys
-import time
-
-def usage ():
- sys.stderr.write ('''
-texi2omf [options] FILE.texi > FILE.omf
-
-Options:
-
---format=FORM set format FORM (HTML, PS, PDF, [XML]).
---location=FILE file name as installed on disk.
---version=VERSION
-
-Use the following commands (enclose in @ignore)
-
-@omfsubject . .
-@omfdescription . .
-@omftype . .
-
-etc.
-
-
-''')
-
-(options, files) = getopt.getopt (sys.argv[1:], '',
- ['format=', 'location=', 'version='])
-
-license = 'FDL'
-location = ''
-version = ''
-email = os.getenv ('MAILADDRESS')
-name = os.getenv ('USERNAME')
-format = 'xml'
-
-for (o, a) in options:
- if o == '--format':
- format = a
- elif o == '--location':
- location = 'file:%s' % a
- elif o == '--version':
- version = a
- else:
- assert 0
-
-
-if not files:
- usage ()
- sys.exit (2)
-
-
-formats = {
- 'html' : 'text/html',
- 'pdf' : 'application/pdf',
- 'ps.gz' : 'application/postscript',
- 'ps' : 'application/postscript',
- 'xml' : 'text/xml',
- }
-
-if not formats.has_key (format):
- sys.stderr.write ("Format `%s' unknown\n" % format)
- sys.exit (1)
-
-
-infile = files[0]
-
-today = time.localtime ()
-
-texi = open (infile).read ()
-
-if not location:
- location = 'file:/%s' % re.sub (r'\..*', '.' + format, infile)
-
-omf_vars = {
- 'date': '%d-%d-%d' % today[:3],
- 'mimeformat': formats[format],
- 'maintainer': "%s (%s)" % (name, email),
- 'version' : version,
- 'location' : location,
- 'language' : 'C',
- }
-
-omf_caterories = ['subject', 'creator', 'maintainer', 'contributor',
- 'title', 'subtitle', 'version', 'category', 'type',
- 'description', 'license', 'language',]
-
-for a in omf_caterories:
- m = re.search ('@omf%s (.*)\n'% a, texi)
- if m:
- omf_vars[a] = m.group (1)
- elif not omf_vars.has_key (a):
- omf_vars[a] = ''
-
-if not omf_vars['title']:
- title = ''
- m = re.search ('@title (.*)\n', texi)
- if m:
- title = m.group (1)
-
- subtitle = ''
- m = re.search ('@subtitle (.*)\n', texi)
- if m:
- subtitle = m.group (1)
-
- if subtitle:
- title = '%s -- %s' % (title, subtitle)
-
- omf_vars['title'] = title
-
-if not omf_vars['creator']:
- m = re.search ('@author (.*)\n', texi)
- if m:
- omf_vars['creator'] = m.group (1)
-
-
-
-print r'''<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE omf PUBLIC "-//OMF//DTD Scrollkeeper OMF Variant V1.0//EN" "http://scrollkeeper.sourceforge.net/dtds/scrollkeeper-omf-1.0/scrollkeeper-omf.dtd">
-<omf>
- <resource>
- <creator>
- %(creator)s
- </creator>
- <maintainer>
- %(maintainer)s
- </maintainer>
- <title>
- %(title)s
- </title>
- <date>
- %(date)s
- </date>
- <version identifier="%(version)s" date="%(date)s" />
- <subject category="%(category)s"/>
- <description>
- %(description)s
- </description>
- <type>
- %(type)s
- </type>
- <format mime="%(mimeformat)s" />
- <identifier url="%(location)s"/>
- <language code="%(language)s"/>
- <rights type="%(license)s" />
- </resource>
-</omf>
-
-''' % omf_vars
-
-
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-USAGE: translations-status.py BUILDSCRIPT-DIR LOCALEDIR
-
- This script must be run from Documentation/
-
- Reads template files translations.template.html.in
-and for each LANG in LANGUAGES LANG/translations.template.html.in
- Writes translations.html.in and for each LANG in LANGUAGES
-translations.LANG.html.in
- Writes out/translations-status.txt
- Updates word counts in TRANSLATION
-"""
-
-import sys
-import re
-import string
-import os
-
-import langdefs
-import buildlib
-
-def progress (str):
- sys.stderr.write (str + '\n')
-
-progress ("translations-status.py")
-
-_doc = lambda s: s
-
-# load gettext messages catalogs
-translation = langdefs.translation
-
-
-language_re = re.compile (r'^@documentlanguage (.+)', re.M)
-comments_re = re.compile (r'^@ignore\n(.|\n)*?\n@end ignore$|@c .*?$', re.M)
-space_re = re.compile (r'\s+', re.M)
-lilypond_re = re.compile (r'@lilypond({.*?}|(.|\n)*?\n@end lilypond$)', re.M)
-node_re = re.compile ('^@node .*?$', re.M)
-title_re = re.compile ('^@(top|chapter|(?:sub){0,2}section|' + \
-'(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?) (.*?)$', re.M)
-include_re = re.compile ('^@include (.*?)$', re.M)
-
-translators_re = re.compile (r'^@c\s+Translators\s*:\s*(.*?)$', re.M | re.I)
-checkers_re = re.compile (r'^@c\s+Translation\s*checkers\s*:\s*(.*?)$',
- re.M | re.I)
-status_re = re.compile (r'^@c\s+Translation\s*status\s*:\s*(.*?)$', re.M | re.I)
-post_gdp_re = re.compile ('post.GDP', re.I)
-untranslated_node_str = '@untranslated'
-skeleton_str = '-- SKELETON FILE --'
-
-section_titles_string = _doc ('Section titles')
-last_updated_string = _doc (' <p><i>Last updated %s</i></p>\n')
-detailed_status_heads = [_doc ('Translators'), _doc ('Translation checkers'),
- _doc ('Translated'), _doc ('Up to date'),
- _doc ('Other info')]
-format_table = {
- 'not translated': {'color':'d0f0f8', 'short':_doc ('no'), 'abbr':'NT',
- 'long':_doc ('not translated')},
- 'partially translated': {'color':'dfef77',
- 'short':_doc ('partially (%(p)d %%)'),
- 'abbr':'%(p)d%%',
- 'long':_doc ('partially translated (%(p)d %%)')},
- 'fully translated': {'color':'1fff1f', 'short':_doc ('yes'), 'abbr':'FT',
- 'long': _doc ('translated')},
- 'up to date': {'short':_doc ('yes'), 'long':_doc ('up to date'),
- 'abbr':'100%%', 'vague':_doc ('up to date')},
- 'outdated': {'short':_doc ('partially'), 'abbr':'%(p)d%%',
- 'vague':_doc ('partially up to date')},
- 'N/A': {'short':_doc ('N/A'), 'abbr':'N/A', 'color':'d587ff', 'vague':''},
- 'pre-GDP':_doc ('pre-GDP'),
- 'post-GDP':_doc ('post-GDP')
-}
-
-texi_level = {
-# (Unumbered/Numbered/Lettered, level)
- 'top': ('u', 0),
- 'unnumbered': ('u', 1),
- 'unnumberedsec': ('u', 2),
- 'unnumberedsubsec': ('u', 3),
- 'chapter': ('n', 1),
- 'section': ('n', 2),
- 'subsection': ('n', 3),
- 'appendix': ('l', 1)
-}
-
-appendix_number_trans = string.maketrans ('@ABCDEFGHIJKLMNOPQRSTUVWXY',
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
-
-class SectionNumber (object):
- def __init__ (self):
- self.__data = [[0,'u']]
-
- def __increase_last_index (self):
- type = self.__data[-1][1]
- if type == 'l':
- self.__data[-1][0] = \
- self.__data[-1][0].translate (appendix_number_trans)
- elif type == 'n':
- self.__data[-1][0] += 1
-
- def format (self):
- if self.__data[-1][1] == 'u':
- return ''
- return '.'.join ([str (i[0]) for i in self.__data if i[1] != 'u']) + ' '
-
- def increase (self, (type, level)):
- if level == 0:
- self.__data = [[0,'u']]
- while level + 1 < len (self.__data):
- del self.__data[-1]
- if level + 1 > len (self.__data):
- self.__data.append ([0, type])
- if type == 'l':
- self.__data[-1][0] = '@'
- if type == self.__data[-1][1]:
- self.__increase_last_index ()
- else:
- self.__data[-1] = ([0, type])
- if type == 'l':
- self.__data[-1][0] = 'A'
- elif type == 'n':
- self.__data[-1][0] = 1
- return self.format ()
-
-
-def percentage_color (percent):
- p = percent / 100.0
- if p < 0.33:
- c = [hex (int (3 * p * b + (1 - 3 * p) * a))[2:]
- for (a, b) in [(0xff, 0xff), (0x5c, 0xa6), (0x5c, 0x4c)]]
- elif p < 0.67:
- c = [hex (int ((3 * p - 1) * b + (2 - 3 * p) * a))[2:]
- for (a, b) in [(0xff, 0xff), (0xa6, 0xff), (0x4c, 0x3d)]]
- else:
- c = [hex (int ((3 * p - 2) * b + 3 * (1 - p) * a))[2:]
- for (a, b) in [(0xff, 0x1f), (0xff, 0xff), (0x3d, 0x1f)]]
- return ''.join (c)
-
-
-def update_word_count (text, filename, word_count):
- return re.sub (r'(?m)^(\d+) *' + filename,
- str (word_count).ljust (6) + filename,
- text)
-
-po_msgid_re = re.compile (r'^msgid "(.*?)"(?:\n"(.*?)")*', re.M)
-
-def po_word_count (po_content):
- s = ' '.join ([''.join (t) for t in po_msgid_re.findall (po_content)])
- return len (space_re.split (s))
-
-sgml_tag_re = re.compile (r'<.*?>', re.S)
-
-def sgml_word_count (sgml_doc):
- s = sgml_tag_re.sub ('', sgml_doc)
- return len (space_re.split (s))
-
-def tely_word_count (tely_doc):
- '''
- Calculate word count of a Texinfo document node by node.
-
- Take string tely_doc as an argument.
- Return a list of integers.
-
- Texinfo comments and @lilypond blocks are not included in word counts.
- '''
- tely_doc = comments_re.sub ('', tely_doc)
- tely_doc = lilypond_re.sub ('', tely_doc)
- nodes = node_re.split (tely_doc)
- return [len (space_re.split (n)) for n in nodes]
-
-
-class TelyDocument (object):
- def __init__ (self, filename):
- self.filename = filename
- self.contents = open (filename).read ()
-
- ## record title and sectionning level of first Texinfo section
- m = title_re.search (self.contents)
- if m:
- self.title = m.group (2)
- self.level = texi_level [m.group (1)]
- else:
- self.title = 'Untitled'
- self.level = ('u', 1)
-
- m = language_re.search (self.contents)
- if m:
- self.language = m.group (1)
-
- included_files = [os.path.join (os.path.dirname (filename), t)
- for t in include_re.findall (self.contents)]
- self.included_files = [p for p in included_files if os.path.exists (p)]
-
- def print_title (self, section_number):
- return section_number.increase (self.level) + self.title
-
-
-class TranslatedTelyDocument (TelyDocument):
- def __init__ (self, filename, masterdocument, parent_translation=None):
- TelyDocument.__init__ (self, filename)
-
- self.masterdocument = masterdocument
- if not hasattr (self, 'language') \
- and hasattr (parent_translation, 'language'):
- self.language = parent_translation.language
- if hasattr (self, 'language'):
- self.translation = translation[self.language]
- else:
- self.translation = lambda x: x
- self.title = self.translation (self.title)
-
- ## record authoring information
- m = translators_re.search (self.contents)
- if m:
- self.translators = [n.strip () for n in m.group (1).split (',')]
- else:
- self.translators = parent_translation.translators
- m = checkers_re.search (self.contents)
- if m:
- self.checkers = [n.strip () for n in m.group (1).split (',')]
- elif isinstance (parent_translation, TranslatedTelyDocument):
- self.checkers = parent_translation.checkers
- else:
- self.checkers = []
-
- ## check whether translation is pre- or post-GDP
- m = status_re.search (self.contents)
- if m:
- self.post_gdp = bool (post_gdp_re.search (m.group (1)))
- else:
- self.post_gdp = False
-
- ## record which parts (nodes) of the file are actually translated
- self.partially_translated = not skeleton_str in self.contents
- nodes = node_re.split (self.contents)
- self.translated_nodes = [not untranslated_node_str in n for n in nodes]
-
- ## calculate translation percentage
- master_total_word_count = sum (masterdocument.word_count)
- translation_word_count = \
- sum ([masterdocument.word_count[k] * self.translated_nodes[k]
- for k in range (min (len (masterdocument.word_count),
- len (self.translated_nodes)))])
- self.translation_percentage = \
- 100 * translation_word_count / master_total_word_count
-
- ## calculate how much the file is outdated
- (diff_string, error) = \
- buildlib.check_translated_doc (masterdocument.filename, self.filename, self.contents)
- if error:
- sys.stderr.write ('warning: %s: %s' % (self.filename, error))
- self.uptodate_percentage = None
- else:
- diff = diff_string.splitlines ()
- insertions = sum ([len (l) - 1 for l in diff
- if l.startswith ('+')
- and not l.startswith ('+++')])
- deletions = sum ([len (l) - 1 for l in diff
- if l.startswith ('-')
- and not l.startswith ('---')])
- outdateness_percentage = 50.0 * (deletions + insertions) / \
- (masterdocument.size + 0.5 * (deletions - insertions))
- self.uptodate_percentage = 100 - int (outdateness_percentage)
- if self.uptodate_percentage > 100:
- alternative = 50
- progress ("%s: strange uptodateness percentage %d %%, \
-setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
- self.uptodate_percentage = alternative
- elif self.uptodate_percentage < 1:
- alternative = 1
- progress ("%s: strange uptodateness percentage %d %%, \
-setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
- self.uptodate_percentage = alternative
-
- def completeness (self, formats=['long'], translated=False):
- if translated:
- translation = self.translation
- else:
- translation = lambda x: x
-
- if isinstance (formats, str):
- formats = [formats]
- p = self.translation_percentage
- if p == 0:
- status = 'not translated'
- elif p == 100:
- status = 'fully translated'
- else:
- status = 'partially translated'
- return dict ([(f, translation (format_table[status][f]) % locals())
- for f in formats])
-
- def uptodateness (self, formats=['long'], translated=False):
- if translated:
- translation = self.translation
- else:
- translation = lambda x: x
-
- if isinstance (formats, str):
- formats = [formats]
- p = self.uptodate_percentage
- if p == None:
- status = 'N/A'
- elif p == 100:
- status = 'up to date'
- else:
- status = 'outdated'
- l = {}
- for f in formats:
- if f == 'color' and p != None:
- l['color'] = percentage_color (p)
- else:
- l[f] = translation (format_table[status][f]) % locals ()
- return l
-
- def gdp_status (self):
- if self.post_gdp:
- return self.translation (format_table['post-GDP'])
- else:
- return self.translation (format_table['pre-GDP'])
-
- def short_html_status (self):
- s = ' <td>'
- if self.partially_translated:
- s += '<br>\n '.join (self.translators) + '<br>\n'
- if self.checkers:
- s += ' <small>' + \
- '<br>\n '.join (self.checkers) + '</small><br>\n'
-
- c = self.completeness (['color', 'long'])
- s += ' <span style="background-color: #%(color)s">\
-%(long)s</span><br>\n' % c
-
- if self.partially_translated:
- u = self.uptodateness (['vague', 'color'])
- s += ' <span style="background-color: #%(color)s">\
-%(vague)s</span><br>\n' % u
-
- s += ' </td>\n'
- return s
-
- def text_status (self):
- s = self.completeness ('abbr')['abbr'] + ' '
-
- if self.partially_translated:
- s += self.uptodateness ('abbr')['abbr'] + ' '
- return s
-
- def html_status (self, numbering=SectionNumber ()):
- if self.title == 'Untitled':
- return ''
-
- if self.level[1] == 0: # if self is a master document
- s = '''<table align="center" border="2">
- <tr align="center">
- <th>%s</th>''' % self.print_title (numbering)
- s += ''.join ([' <th>%s</th>\n' % self.translation (h)
- for h in detailed_status_heads])
- s += ' </tr>\n'
- s += ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
- % (self.translation (section_titles_string),
- sum (self.masterdocument.word_count))
-
- else:
- s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
- % (self.print_title (numbering),
- sum (self.masterdocument.word_count))
-
- if self.partially_translated:
- s += ' <td>' + '<br>\n '.join (self.translators) + '</td>\n'
- s += ' <td>' + '<br>\n '.join (self.checkers) + '</td>\n'
- else:
- s += ' <td></td>\n' * 2
-
- c = self.completeness (['color', 'short'], translated=True)
- s += ' <td><span style="background-color: #%(color)s">\
-%(short)s</span></td>\n' % {'color': c['color'],
- 'short': c['short']}
-
- if self.partially_translated:
- u = self.uptodateness (['short', 'color'], translated=True)
- s += ' <td><span style="background-color: #%(color)s">\
-%(short)s</span></td>\n' % {'color': u['color'],
- 'short': u['short']}
- else:
- s += ' <td></td>\n'
-
- s += ' <td>' + self.gdp_status () + '</td>\n </tr>\n'
- s += ''.join ([i.translations[self.language].html_status (numbering)
- for i in self.masterdocument.includes
- if self.language in i.translations])
-
- if self.level[1] == 0: # if self is a master document
- s += '</table>\n<p></p>\n'
- return s
-
-class MasterTelyDocument (TelyDocument):
- def __init__ (self,
- filename,
- parent_translations=dict ([(lang, None)
- for lang in langdefs.LANGDICT])):
- TelyDocument.__init__ (self, filename)
- self.size = len (self.contents)
- self.word_count = tely_word_count (self.contents)
- translations = dict ([(lang, os.path.join (lang, filename))
- for lang in langdefs.LANGDICT])
- self.translations = \
- dict ([(lang,
- TranslatedTelyDocument (translations[lang],
- self, parent_translations.get (lang)))
- for lang in langdefs.LANGDICT
- if os.path.exists (translations[lang])])
- if self.translations:
- self.includes = [MasterTelyDocument (f, self.translations)
- for f in self.included_files]
- else:
- self.includes = []
-
- def update_word_counts (self, s):
- s = update_word_count (s, self.filename, sum (self.word_count))
- for i in self.includes:
- s = i.update_word_counts (s)
- return s
-
- def html_status (self, numbering=SectionNumber ()):
- if self.title == 'Untitled' or not self.translations:
- return ''
- if self.level[1] == 0: # if self is a master document
- s = '''<table align="center" border="2">
- <tr align="center">
- <th>%s</th>''' % self.print_title (numbering)
- s += ''.join ([' <th>%s</th>\n' % l for l in self.translations])
- s += ' </tr>\n'
- s += ' <tr align="left">\n <td>Section titles<br>(%d)</td>\n' \
- % sum (self.word_count)
-
- else: # if self is an included file
- s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
- % (self.print_title (numbering), sum (self.word_count))
-
- s += ''.join ([t.short_html_status ()
- for t in self.translations.values ()])
- s += ' </tr>\n'
- s += ''.join ([i.html_status (numbering) for i in self.includes])
-
- if self.level[1] == 0: # if self is a master document
- s += '</table>\n<p></p>\n'
- return s
-
- def text_status (self, numbering=SectionNumber (), colspec=[48,12]):
- if self.title == 'Untitled' or not self.translations:
- return ''
-
- s = ''
- if self.level[1] == 0: # if self is a master document
- s += (self.print_title (numbering) + ' ').ljust (colspec[0])
- s += ''.join (['%s'.ljust (colspec[1]) % l
- for l in self.translations])
- s += '\n'
- s += ('Section titles (%d)' % \
- sum (self.word_count)).ljust (colspec[0])
-
- else:
- s = '%s (%d) ' \
- % (self.print_title (numbering), sum (self.word_count))
- s = s.ljust (colspec[0])
-
- s += ''.join ([t.text_status ().ljust(colspec[1])
- for t in self.translations.values ()])
- s += '\n\n'
- s += ''.join ([i.text_status (numbering) for i in self.includes])
-
- if self.level[1] == 0:
- s += '\n'
- return s
-
-
-update_category_word_counts_re = re.compile (r'(?ms)^-(\d+)-(.*?\n)\d+ *total')
-
-counts_re = re.compile (r'(?m)^(\d+) ')
-
-def update_category_word_counts_sub (m):
- return '-' + m.group (1) + '-' + m.group (2) + \
- str (sum ([int (c)
- for c in counts_re.findall (m.group (2))])).ljust (6) + \
- 'total'
-
-
-progress ("Reading documents...")
-
-tely_files = \
- buildlib.read_pipe ("find -maxdepth 2 -name '*.tely'")[0].splitlines ()
-tely_files.sort ()
-master_docs = [MasterTelyDocument (os.path.normpath (filename))
- for filename in tely_files]
-master_docs = [doc for doc in master_docs if doc.translations]
-
-main_status_page = open ('translations.template.html.in').read ()
-
-enabled_languages = [l for l in langdefs.LANGDICT
- if langdefs.LANGDICT[l].enabled
- and l != 'en']
-lang_status_pages = \
- dict ([(l, open (os.path.join (l, 'translations.template.html.in')). read ())
- for l in enabled_languages])
-
-progress ("Generating status pages...")
-
-date_time = buildlib.read_pipe ('LANG= date -u')[0]
-
-main_status_html = last_updated_string % date_time
-main_status_html += '\n'.join ([doc.html_status () for doc in master_docs])
-
-html_re = re.compile ('<html>', re.I)
-end_body_re = re.compile ('</body>', re.I)
-
-html_header = '''<html>
-<!-- This page is automatically generated by translation-status.py from
-translations.template.html.in; DO NOT EDIT !-->'''
-
-main_status_page = html_re.sub (html_header, main_status_page)
-
-main_status_page = end_body_re.sub (main_status_html + '\n</body>',
- main_status_page)
-
-open ('translations.html.in', 'w').write (main_status_page)
-
-for l in enabled_languages:
- date_time = buildlib.read_pipe ('LANG=%s date -u' % l)[0]
- lang_status_pages[l] = translation[l] (last_updated_string) % date_time + lang_status_pages[l]
- lang_status_page = html_re.sub (html_header, lang_status_pages[l])
- html_status = '\n'.join ([doc.translations[l].html_status ()
- for doc in master_docs
- if l in doc.translations])
- lang_status_page = end_body_re.sub (html_status + '\n</body>',
- lang_status_page)
- open (os.path.join (l, 'translations.html.in'), 'w').write (lang_status_page)
-
-main_status_txt = '''Documentation translations status
-Generated %s
-NT = not translated
-FT = fully translated
-
-''' % date_time
-
-main_status_txt += '\n'.join ([doc.text_status () for doc in master_docs])
-
-status_txt_file = 'out/translations-status.txt'
-progress ("Writing %s..." % status_txt_file)
-open (status_txt_file, 'w').write (main_status_txt)
-
-translation_instructions_file = 'TRANSLATION'
-progress ("Updating %s..." % translation_instructions_file)
-translation_instructions = open (translation_instructions_file).read ()
-
-for doc in master_docs:
- translation_instructions = doc.update_word_counts (translation_instructions)
-
-for html_file in re.findall (r'(?m)^\d+ *(\S+?\.html\S*?)(?: |$)',
- translation_instructions):
- word_count = sgml_word_count (open (html_file).read ())
- translation_instructions = update_word_count (translation_instructions,
- html_file,
- word_count)
-
-for po_file in re.findall (r'(?m)^\d+ *(\S+?\.po\S*?)(?: |$)',
- translation_instructions):
- word_count = po_word_count (open (po_file).read ())
- translation_instructions = update_word_count (translation_instructions,
- po_file,
- word_count)
-
-translation_instructions = \
- update_category_word_counts_re.sub (update_category_word_counts_sub,
- translation_instructions)
-
-open (translation_instructions_file, 'w').write (translation_instructions)
+++ /dev/null
-#!@PYTHON@
-# update-snippets.py
-
-# USAGE: update-snippets.py REFERENCE-DIR TARGET-DIR FILES
-#
-# update ly snippets in TARGET-DIR/FILES with snippets from REFERENCE-DIR/FILES
-#
-# More precisely, each existing FILE in TARGET-DIR is matched to the FILE in
-# REFERENCE-DIR (it the latter does not exist, a warning is given).
-#
-# Shell wildcards expansion is performed on FILES.
-# This script currently supports Texinfo format.
-# Ly snippets preceded with a line containing '@c KEEP LY' in TARGET-DIR/FILES
-# will not be updated.
-# An error occurs if REFERENCE-DIR/FILE and TARGET-DIR/FILE do not have the
-# same snippets count.
-
-import sys
-import os
-import glob
-import re
-
-print "update-snippets.py"
-
-comment_re = re.compile (r'(?<!@)(@c(?:omment)? .*?\n|^@ignore\n.*?\n@end ignore\n)', re.M | re.S)
-snippet_re = re.compile (r'^(@lilypond(?:file)?(?:\[.*?\])?\s*\{.+?\}|@lilypond(?:\[.*?\])?(?:.|\n)+?@end lilypond)', re.M)
-
-
-def snippet_split (l):
- r = []
- for s in [s for s in l if s]:
- if s.startswith ('@c ') or s.startswith ('@ignore\n') or s.startswith ('@comment '):
- r.append(s)
- else:
- r += [t for t in snippet_re.split (s) if t]
- return r
-
-def count_snippet (l):
- k = 0
- for s in l:
- if s.startswith ('@lilypond'):
- k += 1
- return k
-
-def find_next_snippet (l, k):
- while not l[k].startswith ('@lilypond'):
- k += 1
- return k
-
-exit_code = 0
-
-def update_exit_code (code):
- global exit_code
- exit_code = max (code, exit_code)
-
-ref_dir, target_dir = sys.argv [1:3]
-file_patterns = sys.argv[3:]
-
-total_snippet_count = 0
-changed_snippets_count = 0
-
-for pattern in file_patterns:
- files = glob.glob (os.path.join (target_dir, pattern))
- for file in files:
- ref_file = os.path.join (ref_dir, os.path.basename (file))
- if not os.path.isfile (ref_file):
- sys.stderr.write ("Warning: %s: no such file.\nReference file for %s not found.\n" % (ref_file, file))
- continue
- f = open (file, 'r')
- target_source = comment_re.split (f.read ())
- f.close ()
- if reduce (lambda x, y: x or y, ['-- SKELETON FILE --' in s for s in target_source]):
- sys.stderr.write ("Skipping skeleton file %s\n" % file)
- continue
- g = open (ref_file, 'r')
- ref_source = comment_re.split (g.read ())
- target_source = snippet_split (target_source)
- ref_source = snippet_split (ref_source)
- if '' in target_source or '' in ref_source:
- raise "AAAAARGH: unuseful empty string"
- snippet_count = count_snippet (target_source)
- if not snippet_count == count_snippet (ref_source):
- update_exit_code (1)
- sys.stderr.write ("Error: %s and %s have different snippet counts.\n\
-Update translation by at least adding a @lilypond block where necessary, then rerun this script.\n" % (ref_file, file))
- continue
- total_snippet_count += snippet_count
- c = 0
- k = -1
- for j in range (len (target_source)):
- if target_source[j].startswith ('@lilypond'):
- k = find_next_snippet (ref_source, k+1)
- if j > 0 and (not target_source[j-1].startswith ('@c KEEP LY')) and target_source[j] != ref_source[k]:
- target_source[j] = ref_source[k]
- c += 1
- changed_snippets_count += 1
- f = open (file, 'w')
- f.write (''.join (target_source))
- sys.stderr.write ('%s: %d/%d snippets updated\n' % (file, c, snippet_count))
-
-sys.stderr.write ('\nTotal: %d snippets, %d updated snippets.\n' % (total_snippet_count, changed_snippets_count))
-sys.exit (exit_code)
+++ /dev/null
-#!@PYTHON@
-
-## This is www_post.py. This script is the main stage
-## of toplevel GNUmakefile local-WWW-post target.
-
-# USAGE: www_post PACKAGE_NAME TOPLEVEL_VERSION OUTDIR TARGETS
-# please call me from top of the source directory
-
-import sys
-import os
-import re
-
-import langdefs
-
-import mirrortree
-import postprocess_html
-
-package_name, package_version, outdir, targets = sys.argv[1:]
-targets = targets.split (' ')
-outdir = os.path.normpath (outdir)
-doc_dirs = ['input', 'Documentation', outdir]
-target_pattern = os.path.join (outdir, '%s-root')
-
-# these redirection pages allow to go back to the documentation index
-# from HTML manuals/snippets page
-static_files = {
- os.path.join (outdir, 'index.html'):
- '''<META HTTP-EQUIV="refresh" content="0;URL=Documentation/index.html">
-<html><body>Redirecting to the documentation index...</body></html>\n''',
- os.path.join (outdir, 'VERSION'):
- package_version + '\n',
- os.path.join ('input', 'lsr', outdir, 'index.html'):
- '''<META HTTP-EQUIV="refresh" content="0;URL=../../index.html">
-<html><body>Redirecting to the documentation index...</body></html>\n'''
- }
-
-for l in langdefs.LANGUAGES:
- static_files[os.path.join ('Documentation', 'user', outdir, l.file_name ('index', '.html'))] = \
- '<META HTTP-EQUIV="refresh" content="0;URL=../' + l.file_name ('index', '.html') + \
- '">\n<html><body>Redirecting to the documentation index...</body></html>\n'
-
-for f, contents in static_files.items ():
- open (f, 'w').write (contents)
-
-sys.stderr.write ("Mirrorring...\n")
-dirs, symlinks, files = mirrortree.walk_tree (
- tree_roots = doc_dirs,
- process_dirs = outdir,
- exclude_dirs = '(^|/)(' + r'|po|out|out-test|.*?[.]t2d|\w*?-root)(/|$)|Documentation/(' + '|'.join ([l.code for l in langdefs.LANGUAGES]) + ')',
- find_files = r'.*?\.(?:midi|html|pdf|png|txt|i?ly|signature|css|zip|xml|mxl)$|VERSION',
- exclude_files = r'lily-[0-9a-f]+.*\.(pdf|txt)')
-
-# actual mirrorring stuff
-html_files = []
-hardlinked_files = []
-for f in files:
- if f.endswith ('.html'):
- html_files.append (f)
- else:
- hardlinked_files.append (f)
-dirs = [re.sub ('/' + outdir, '', d) for d in dirs]
-while outdir in dirs:
- dirs.remove (outdir)
-dirs = list (set (dirs))
-dirs.sort ()
-
-strip_file_name = {}
-strip_re = re.compile (outdir + '/')
-for t in targets:
- out_root = target_pattern % t
- strip_file_name[t] = lambda s: os.path.join (target_pattern % t, (strip_re.sub ('', s)))
- os.mkdir (out_root)
- map (os.mkdir, [os.path.join (out_root, d) for d in dirs])
- for f in hardlinked_files:
- os.link (f, strip_file_name[t] (f))
- for l in symlinks:
- p = mirrortree.new_link_path (os.path.normpath (os.readlink (l)), os.path.dirname (l), strip_re)
- dest = strip_file_name[t] (l)
- if not os.path.exists (dest):
- os.symlink (p, dest)
-
- ## ad-hoc renaming to make xrefs between PDFs work
- os.rename (os.path.join (out_root, 'input/lsr/lilypond-snippets.pdf'),
- os.path.join (out_root, 'Documentation/user/lilypond-snippets.pdf'))
-
-# need this for content negotiation with documentation index
-if 'online' in targets:
- f = open (os.path.join (target_pattern % 'online', 'Documentation/.htaccess'), 'w')
- f.write ('#.htaccess\nDirectoryIndex index\n')
- f.close ()
-
-postprocess_html.build_pages_dict (html_files)
-for t in targets:
- sys.stderr.write ("Processing HTML pages for %s target...\n" % t)
- postprocess_html.process_html_files (
- package_name = package_name,
- package_version = package_version,
- target = t,
- name_filter = strip_file_name[t])
-
NCSB_FILE=`$FCMATCH --verbose "Century Schoolbook L:style=$style" | grep 'file:' | grep -v "\.ttf"`
NCSB_FILE=`echo $NCSB_FILE | sed 's/^.*"\(.*\)".*$/\1/g'`
- NCSB_FILE=`$PYTHON "$srcdir/buildscripts/readlink.py" $NCSB_FILE`
+ NCSB_FILE=`$PYTHON "$srcdir/scripts/aux/readlink.py" $NCSB_FILE`
NCSB_SOURCE_FILES="$NCSB_FILE $NCSB_SOURCE_FILES"
done
else
LILYPOND_WORDS = $(outdir)/lilypond-words.el
LILYPOND_WORDS_DEPENDS =\
$(top-src-dir)/lily/lily-lexer.cc \
- $(buildscript-dir)/lilypond-words.py \
+ $(buildscript-dir)/lilypond-words \
$(top-src-dir)/scm/markup.scm \
$(top-src-dir)/ly/engraver-init.ly
+$(buildscript-dir)/lilypond-words:
+ make -C $(depth)/scripts/build
+
$(LILYPOND_WORDS):
- cd $(top-src-dir) && $(PYTHON) buildscripts/lilypond-words.py --el --dir=$(top-build-dir)/elisp/$(outconfbase)
+ cd $(top-src-dir) && $(buildscript-dir)/lilypond-words --el --dir=$(top-build-dir)/elisp/$(outconfbase)
all: $(LILYPOND_WORDS)
GENERATED_ITELY_FILES = $(IN_ITELY_FILES:%-intro.itely=$(outdir)/%.itely)
$(outdir)/%.itely: %-intro.itely %.snippet-list
- xargs $(PYTHON) $(buildscript-dir)/lys-to-tely.py -f doctitle,texidoc,verbatim --name=$@ --template=$< < $(filter %.snippet-list, $^)
+ xargs $(LYS_TO_TELY) -f doctitle,texidoc,verbatim --name=$@ --template=$< < $(filter %.snippet-list, $^)
$(outdir)/lilypond-snippets.texi: $(GENERATED_ITELY_FILES) $(LY_FILES)
To update this directory, do at top of the source tree
-buildscripts/makelsr.py DIR
+scripts/aux/makelsr.py DIR
where DIR is the directory unpacked from lsr-snippets-doc-DATE tarball
available on http://lsr.dsi.unimi.it/download.
default:
local-WWW-2: $(OUT_HTML_FILES)
- $(PYTHON) $(buildscript-dir)/mass-link.py --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/$(outdir) $(HTML_FILES)
+ $(buildscript-dir)/mass-link --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/$(outdir) $(HTML_FILES)
endif
$(outdir)/%.pdftexi: $(outdir)/%.texi
- $(PYTHON) $(buildscript-dir)/texi-gettext.py $(ISOLANG) $<
+ $(buildscript-dir)/texi-gettext $(ISOLANG) $<
$(outdir)/%.pdf: $(outdir)/%.pdftexi
cd $(outdir); texi2pdf $(TEXI2PDF_FLAGS) $(TEXINFO_PAPERSIZE_OPTION) $(notdir $*).pdftexi
ln -f $< $@
$(XREF_MAPS_DIR)/%.$(ISOLANG).xref-map: $(outdir)/%.texi
- $(PYTHON) $(buildscript-dir)/extract_texi_filenames.py -o $(XREF_MAPS_DIR) $<
+ $(buildscript-dir)/extract_texi_filenames -o $(XREF_MAPS_DIR) $<
$(MASTER_TEXI_FILES): $(ITELY_FILES) $(ITEXI_FILES)
local-WWW-1: $(MASTER_TEXI_FILES) $(PDF_FILES) $(XREF_MAPS_FILES)
local-WWW-2: $(DEEP_HTML_FILES) $(BIG_PAGE_HTML_FILES) $(DOCUMENTATION_LOCALE_TARGET)
- find $(outdir) -name '*.html' | xargs grep -L 'UNTRANSLATED NODE: IGNORE ME' | xargs $(PYTHON) $(buildscript-dir)/html-gettext.py $(ISOLANG)
- find $(outdir) -name '*.html' | xargs grep -L --label="" 'UNTRANSLATED NODE: IGNORE ME' | sed 's!$(outdir)/!!g' | xargs $(PYTHON) $(buildscript-dir)/mass-link.py --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir) $(TELY_FILES:%.tely=%.pdf)
- find $(outdir) \( -name 'lily-*.png' -o -name 'lily-*.ly' \) | sed 's!$(outdir)/!!g' | xargs $(PYTHON) $(buildscript-dir)/mass-link.py hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir)
+ find $(outdir) -name '*.html' | xargs grep -L 'UNTRANSLATED NODE: IGNORE ME' | xargs $(buildscript-dir)/html-gettext $(ISOLANG)
+ find $(outdir) -name '*.html' | xargs grep -L --label="" 'UNTRANSLATED NODE: IGNORE ME' | sed 's!$(outdir)/!!g' | xargs $(buildscript-dir)/mass-link --prepend-suffix .$(ISOLANG) hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir) $(TELY_FILES:%.tely=%.pdf)
+ find $(outdir) \( -name 'lily-*.png' -o -name 'lily-*.ly' \) | sed 's!$(outdir)/!!g' | xargs $(buildscript-dir)/mass-link hard $(outdir) $(top-build-dir)/Documentation/user/$(outdir)
$(DOCUMENTATION_LOCALE_TARGET):
$(MAKE) -C $(depth)/Documentation/po out=www messages
# you do make dist
#
-buildscript-dir = $(src-depth)/buildscripts
+buildscript-dir = $(top-build-dir)/scripts/build/$(outconfbase)
+auxpython-dir = $(src-depth)/python/aux
+auxscript-dir = $(src-depth)/scripts/aux
script-dir = $(src-depth)/scripts
input-dir = $(src-depth)/input
make-dir = $(src-depth)/make
include-flower = $(src-depth)/flower/include
-export PYTHONPATH:=$(buildscript-dir):$(PYTHONPATH)
+export PYTHONPATH:=$(auxpython-dir):$(PYTHONPATH)
LILYPOND_INCLUDES = $(include-flower) $(depth)/flower/$(outdir)
ifeq ($(LILYPOND_EXTERNAL_BINARY),)
# environment settings.
-export PATH:=$(top-build-dir)/lily/$(outconfbase):$(top-build-dir)/buildscripts/$(outconfbase):$(top-build-dir)/scripts/$(outconfbase):$(PATH):
+export PATH:=$(top-build-dir)/lily/$(outconfbase):$(buildscript-dir):$(top-build-dir)/scripts/$(outconfbase):$(PATH):
export LILYPOND_BINARY=$(top-build-dir)/$(outconfbase)/bin/lilypond
else
#texi-html for www only:
LILYPOND_BOOK_FORMAT=$(if $(subst out-www,,$(notdir $(outdir))),texi,texi-html)
LY2DVI = $(LILYPOND_BINARY)
-LYS_TO_TELY = $(buildscript-dir)/lys-to-tely.py
+LYS_TO_TELY = $(buildscript-dir)/lys-to-tely
$(outdir)/collated-files.tely: $(COLLATED_FILES)
- $(PYTHON) $(LYS_TO_TELY) --name=$(outdir)/collated-files.tely --title="$(TITLE)" $^
+ $(LYS_TO_TELY) --name=$(outdir)/collated-files.tely --title="$(TITLE)" $^
$(outdir)/aybabtu.subfonts:
echo $(subst .mf,,$(call src-wildcard,feta-braces-[a-z].mf)) > $@
-$(PE_SCRIPTS): $(top-build-dir)/buildscripts/$(outdir)/gen-emmentaler-scripts
- $(PYTHON) $< --dir=$(outdir)
+$(PE_SCRIPTS): $(buildscript-dir)/gen-emmentaler-scripts
+ $< --dir=$(outdir)
ALL_FONTS = $(FETA_FONTS)
PFB_FILES = $(ALL_FONTS:%=$(outdir)/%.pfb)
$(outdir)/%.otf-gtable \
$(outdir)/%.enc \
$(outdir)/%.pe: $(outdir)/%.log
- $(PYTHON) $(buildscript-dir)/mf-to-table.py \
- --global-lisp=$(outdir)/$(<F:.log=.otf-gtable) \
- --lisp=$(outdir)/$(<F:.log=.lisp) \
- --outdir=$(outdir) \
- --enc $(outdir)/$(<F:.log=.enc) \
- $<
+ $(buildscript-dir)/mf-to-table \
+ --global-lisp=$(outdir)/$(<F:.log=.otf-gtable) \
+ --lisp=$(outdir)/$(<F:.log=.lisp) \
+ --outdir=$(outdir) \
+ --enc $(outdir)/$(<F:.log=.enc) \
+ $<
local-clean:
rm -f mfplain.mem mfplain.log
echo '<fontconfig><dir>'$(shell cd $(outdir); pwd)'</dir></fontconfig>' > $@
$(NCSB_OTFS): $(NCSB_SOURCE_FILES) \
- $(buildscript-dir)/pfx2ttf.fontforge
+ $(auxscript-dir)/pfx2ttf.fontforge
$(foreach i, $(basename $(NCSB_SOURCE_FILES)), \
- $(FONTFORGE) -script $(buildscript-dir)/pfx2ttf.fontforge \
+ $(FONTFORGE) -script $(auxscript-dir)/pfx2ttf.fontforge \
$(i).pfb $(i).afm $(outdir)/ && ) true
# eof
the file may accumulate the list of obsolete translations, which may
help to translate some changed entries and may be safely dropped out.
-* because I never install LilyPond, I (check-out buildscripts/set-lily.sh)
- made these links:
+* because I never install LilyPond, I made these links:
ln -s $LILYPOND_SOURCEDIR/po/out/nl.mo
$PREFIX/usr/share/locale/nl/LC_MESSAGES/lilypond.mo
depth = ..
+SUBDIRS=aux
+
STEPMAKE_TEMPLATES=c python-module install-out po
include $(depth)/make/stepmake.make
--- /dev/null
+depth=../..
+
+EXTRA_DIST_FILES = $(call src-wildcard,*.py)
+
+include $(depth)/make/stepmake.make
+
+default:
+
+local-clean:
+ rm -f *.pyc
--- /dev/null
+#!@PYTHON@
+
+import subprocess
+import re
+import sys
+
+verbose = False
+
+def read_pipe (command):
+ child = subprocess.Popen (command,
+ stdout = subprocess.PIPE,
+ stderr = subprocess.PIPE,
+ shell = True)
+ (output, error) = child.communicate ()
+ code = str (child.wait ())
+ if not child.stdout or child.stdout.close ():
+ print "pipe failed: %(command)s" % locals ()
+ if code != '0':
+ error = code + ' ' + error
+ return (output, error)
+
+revision_re = re.compile ('GIT [Cc]ommittish:\s+([a-f0-9]+)')
+vc_diff_cmd = 'git diff %(color_flag)s %(revision)s HEAD -- %(original)s | cat'
+
+def check_translated_doc (original, translated_file, translated_contents, color=False):
+ m = revision_re.search (translated_contents)
+ if not m:
+ sys.stderr.write ('error: ' + translated_file + \
+ ": no 'GIT committish: <hash>' found.\nPlease check " + \
+ 'the whole file against the original in English, then ' + \
+ 'fill in HEAD committish in the header.\n')
+ sys.exit (1)
+ revision = m.group (1)
+
+ if color:
+ color_flag = '--color'
+ else:
+ color_flag = '--no-color'
+ c = vc_diff_cmd % vars ()
+ if verbose:
+ sys.stderr.write ('running: ' + c)
+ return read_pipe (c)
--- /dev/null
+#!/usr/bin/python
+
+# This module is imported by check_texi_refs.py
+
+references_dict = {
+ 'lilypond': 'ruser',
+ 'lilypond-learning': 'rlearning',
+ 'lilypond-program': 'rprogram',
+ 'lilypond-snippets': 'rlsr',
+ 'music-glossary': 'rglos',
+ 'lilypond-internals': 'rinternals' }
--- /dev/null
+#!@PYTHON@
+
+import re
+import os
+
+def new_link_path (link, dir, r):
+ l = link.split ('/')
+ d = dir.split ('/')
+ i = 0
+ while i < len(d) and i < len(l) and l[i] == '..':
+ if r.match (d[i]):
+ del l[i]
+ else:
+ i += 1
+ return '/'.join ([x for x in l if not r.match (x)])
+
+def walk_tree (tree_roots = [],
+ process_dirs = '.*',
+ exclude_dirs = '',
+ find_files = '.*',
+ exclude_files = ''):
+ """Walk directory trees and.returns (dirs, symlinks, files, extra_files) tuple.
+
+ Arguments:
+ tree_roots=DIRLIST use DIRLIST as tree roots list
+ process_dir=PATTERN only process files in directories named PATTERN
+ exclude_dir=PATTERN don't recurse into directories named PATTERN
+ find_files=PATTERN filters files which are hardlinked
+ exclude_files=PATTERN exclude files named PATTERN
+ """
+ find_files_re = re.compile (find_files)
+ exclude_dirs_re = re.compile (exclude_dirs)
+ exclude_files_re = re.compile (exclude_files)
+ process_dirs_re = re.compile (process_dirs)
+
+ dirs_paths = []
+ symlinks_paths = []
+ files_paths = []
+
+ for d in tree_roots:
+ for current_dir, dirs, files in os.walk(d):
+ i = 0
+ while i < len(dirs):
+ if exclude_dirs_re.search (os.path.join (current_dir, dirs[i])):
+ del dirs[i]
+ else:
+ p = os.path.join (current_dir, dirs[i])
+ if os.path.islink (p):
+ symlinks_paths.append (p)
+ i += 1
+ if not process_dirs_re.search (current_dir):
+ continue
+ dirs_paths.append (current_dir)
+ for f in files:
+ if exclude_files_re.match (f):
+ continue
+ p = os.path.join (current_dir, f)
+ if os.path.islink (p):
+ symlinks_paths.append (p)
+ elif find_files_re.match (f):
+ files_paths.append (p)
+ return (dirs_paths, symlinks_paths, files_paths)
--- /dev/null
+#!@PYTHON@
+
+"""
+Postprocess HTML files:
+add footer, tweak links, add language selection menu.
+"""
+import re
+import os
+import time
+import operator
+
+import langdefs
+
+# This is to try to make the docball not too big with almost duplicate files
+# see process_links()
+non_copied_pages = ['Documentation/user/out-www/lilypond-big-page',
+ 'Documentation/user/out-www/lilypond-internals-big-page',
+ 'Documentation/user/out-www/lilypond-learning-big-page',
+ 'Documentation/user/out-www/lilypond-program-big-page',
+ 'Documentation/user/out-www/music-glossary-big-page',
+ 'out-www/examples',
+ 'Documentation/topdocs',
+ 'Documentation/bibliography',
+ 'Documentation/out-www/THANKS',
+ 'Documentation/out-www/DEDICATION',
+ 'Documentation/out-www/devel',
+ 'input/']
+
+def _doc (s):
+ return s
+
+header = r"""
+"""
+
+footer = '''
+<div class="footer">
+<p class="footer_version">
+%(footer_name_version)s
+</p>
+<p class="footer_report">
+%(footer_report_links)s
+</p>
+</div>
+'''
+footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).')
+# ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process
+footer_report_links = _doc ('Your <a href="%(suggest_Docs_url)s">suggestions for the documentation</a> are welcome, please report errors to our <a href="%(mail_address_url)s">bug list</a>.')
+
+
+mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs'
+suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding'
+
+header_tag = '<!-- header_tag -->'
+header_tag_re = re.compile (header_tag)
+
+footer_tag = '<!-- footer_tag -->'
+footer_tag_re = re.compile (footer_tag)
+
+lang_available = _doc ("Other languages: %s.")
+browser_lang = _doc ('About <A HREF="%s">automatic language selection</A>.')
+browser_language_url = "/web/about/browser-language"
+
+LANGUAGES_TEMPLATE = '''
+<p id="languages">
+ %(language_available)s
+ <br/>
+ %(browser_language)s
+</p>
+'''
+
+
+html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$')
+pages_dict = {}
+
+def build_pages_dict (filelist):
+ """Build dictionary of available translations of each page"""
+ global pages_dict
+ for f in filelist:
+ m = html_re.match (f)
+ if m:
+ g = m.groups()
+ if len (g) <= 1 or g[1] == None:
+ e = ''
+ else:
+ e = g[1]
+ if not g[0] in pages_dict:
+ pages_dict[g[0]] = [e]
+ else:
+ pages_dict[g[0]].append (e)
+
+def source_links_replace (m, source_val):
+ return 'href="' + os.path.join (source_val, m.group (1)) + '"'
+
+splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\
+Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\
+lilypond-learning))/')
+
+snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets')
+user_ref_re = re.compile ('href="(?:\.\./)?lilypond\
+(-internals|-learning|-program|(?!-snippets))')
+
+docindex_link_re = re.compile (r'href="index.html"')
+
+
+## Windows does not support symlinks.
+# This function avoids creating symlinks for splitted HTML manuals
+# Get rid of symlinks in GNUmakefile.in (local-WWW-post)
+# this also fixes missing PNGs only present in translated docs
+def hack_urls (s, prefix):
+ if splitted_docs_re.match (prefix):
+ s = re.sub ('(href|src)="(../lily-.*?|.*?[.]png)"', '\\1="../\\2"', s)
+
+ # fix xrefs between documents in different directories ad hoc
+ if 'user/out-www/lilypond' in prefix:
+ s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s)
+ elif 'input/lsr' in prefix:
+ s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s)
+
+ # we also need to replace in the lsr, which is already processed above!
+ if 'input/' in prefix or 'Documentation/topdocs' in prefix:
+ # fix the link from the regtest, lsr and topdoc pages to the doc index
+ # (rewrite prefix to obtain the relative path of the doc index page)
+ rel_link = re.sub (r'out-www/.*$', '', prefix)
+ rel_link = re.sub (r'[^/]*/', '../', rel_link)
+ if 'input/regression' in prefix:
+ indexfile = "Documentation/devel"
+ else:
+ indexfile = "index"
+ s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s)
+
+ source_path = os.path.join (os.path.dirname (prefix), 'source')
+ if not os.path.islink (source_path):
+ return s
+ source_val = os.readlink (source_path)
+ return re.sub ('href="source/(.*?)"', lambda m: source_links_replace (m, source_val), s)
+
+body_tag_re = re.compile ('(?i)<body([^>]*)>')
+html_tag_re = re.compile ('(?i)<html>')
+doctype_re = re.compile ('(?i)<!DOCTYPE')
+doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
+css_re = re.compile ('(?i)<link rel="stylesheet" type="text/css" ([^>]*)href="[^">]*?lilypond.*\.css"([^>]*)>')
+end_head_tag_re = re.compile ('(?i)</head>')
+css_link = """ <link rel="stylesheet" type="text/css" title="Patrick McCarty's design" href="%(rel)sDocumentation/lilypond-mccarty.css">
+ <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond.css" title="Andrew Hawryluk's design">
+ <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond-blue.css" title="Kurt Kroon's blue design">
+ <!--[if lte IE 7]>
+ <link href="%(rel)sDocumentation/lilypond-ie-fixes.css" rel="stylesheet" type="text/css">
+ <![endif]-->
+"""
+
+
+def add_header (s, prefix):
+ """Add header (<body>, doctype and CSS)"""
+ if header_tag_re.search (s) == None:
+ body = '<body\\1>'
+ (s, n) = body_tag_re.subn (body + header, s, 1)
+ if not n:
+ (s, n) = html_tag_re.subn ('<html>' + header, s, 1)
+ if not n:
+ s = header + s
+
+ s = header_tag + '\n' + s
+
+ if doctype_re.search (s) == None:
+ s = doctype + s
+
+ if css_re.search (s) == None:
+ depth = (prefix.count ('/') - 1) * '../'
+ s = end_head_tag_re.sub ((css_link % {'rel': depth}) + '</head>', s)
+ return s
+
+title_tag_re = re.compile ('.*?<title>(.*?)</title>', re.DOTALL)
+AT_web_title_re = re.compile ('@WEB-TITLE@')
+
+def add_title (s):
+ # urg
+ # maybe find first node?
+ fallback_web_title = '-- --'
+ m = title_tag_re.match (s)
+ if m:
+ fallback_web_title = m.group (1)
+ s = AT_web_title_re.sub (fallback_web_title, s)
+ return s
+
+footer_insert_re = re.compile ('<!--\s*FOOTER\s*-->')
+end_body_re = re.compile ('(?i)</body>')
+end_html_re = re.compile ('(?i)</html>')
+
+def add_footer (s, footer_text):
+ """add footer"""
+ (s, n) = footer_insert_re.subn (footer_text + '\n' + '<!-- FOOTER -->', s, 1)
+ if not n:
+ (s, n) = end_body_re.subn (footer_text + '\n' + '</body>', s, 1)
+ if not n:
+ (s, n) = end_html_re.subn (footer_text + '\n' + '</html>', s, 1)
+ if not n:
+ s += footer_text + '\n'
+ return s
+
+def find_translations (prefix, lang_ext):
+ """find available translations of a page"""
+ available = []
+ missing = []
+ for l in langdefs.LANGUAGES:
+ e = l.webext
+ if lang_ext != e:
+ if e in pages_dict[prefix]:
+ available.append (l)
+ elif lang_ext == '' and l.enabled and reduce (operator.and_,
+ [not prefix.startswith (s)
+ for s in non_copied_pages]):
+ # English version of missing translated pages will be written
+ missing.append (e)
+ return available, missing
+
+online_links_re = re.compile ('''(href|src)=['"]\
+((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\
+([.]html)(#[^"']*|)['"]''')
+offline_links_re = re.compile ('href=[\'"]\
+((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]')
+big_page_name_re = re.compile ('''(.+?)-big-page''')
+
+def process_i18n_big_page_links (match, prefix, lang_ext):
+ big_page_name = big_page_name_re.match (match.group (1))
+ if big_page_name:
+ destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix),
+ big_page_name.group (0)))
+ if not lang_ext in pages_dict[destination_path]:
+ return match.group (0)
+ return 'href="' + match.group (1) + '.' + lang_ext \
+ + match.group (2) + match.group (3) + '"'
+
+def process_links (s, prefix, lang_ext, file_name, missing, target):
+ page_flavors = {}
+ if target == 'online':
+ # Strip .html, suffix for auto language selection (content
+ # negotiation). The menu must keep the full extension, so do
+ # this before adding the menu.
+ page_flavors[file_name] = \
+ [lang_ext, online_links_re.sub ('\\1="\\2\\4"', s)]
+ elif target == 'offline':
+ # in LANG doc index: don't rewrite .html suffixes
+ # as not all .LANG.html pages exist;
+ # the doc index should be translated and contain links with the right suffixes
+ if prefix == 'Documentation/out-www/index':
+ page_flavors[file_name] = [lang_ext, s]
+ elif lang_ext == '':
+ page_flavors[file_name] = [lang_ext, s]
+ for e in missing:
+ page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = \
+ [e, offline_links_re.sub ('href="\\1.' + e + '\\2\\3"', s)]
+ else:
+ # For saving bandwidth and disk space, we don't duplicate big pages
+ # in English, so we must process translated big pages links differently.
+ if 'big-page' in prefix:
+ page_flavors[file_name] = \
+ [lang_ext,
+ offline_links_re.sub \
+ (lambda match: process_i18n_big_page_links (match, prefix, lang_ext),
+ s)]
+ else:
+ page_flavors[file_name] = \
+ [lang_ext,
+ offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)]
+ return page_flavors
+
+def add_menu (page_flavors, prefix, available, target, translation):
+ for k in page_flavors:
+ language_menu = ''
+ languages = ''
+ if page_flavors[k][0] != '':
+ t = translation[page_flavors[k][0]]
+ else:
+ t = _doc
+ for lang in available:
+ lang_file = lang.file_name (os.path.basename (prefix), '.html')
+ if language_menu != '':
+ language_menu += ', '
+ language_menu += '<a href="%s">%s</a>' % (lang_file, t (lang.name))
+ if target == 'offline':
+ browser_language = ''
+ elif target == 'online':
+ browser_language = t (browser_lang) % browser_language_url
+ if language_menu:
+ language_available = t (lang_available) % language_menu
+ languages = LANGUAGES_TEMPLATE % vars ()
+ page_flavors[k][1] = add_footer (page_flavors[k][1], languages)
+ return page_flavors
+
+
+def process_html_files (package_name = '',
+ package_version = '',
+ target = 'offline',
+ name_filter = lambda s: s):
+ """Add header, footer and tweak links to a number of HTML files
+
+ Arguments:
+ package_name=NAME set package_name to NAME
+ package_version=VERSION set package version to VERSION
+ targets=offline|online set page processing depending on the target
+ offline is for reading HTML pages locally
+ online is for hosting the HTML pages on a website with content
+ negotiation
+ name_filter a HTML file name filter
+ """
+ translation = langdefs.translation
+ localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
+
+ if "http://" in mail_address:
+ mail_address_url = mail_address
+ else:
+ mail_address_url= 'mailto:' + mail_address
+
+ versiontup = package_version.split ('.')
+ branch_str = _doc ('stable-branch')
+ if int (versiontup[1]) % 2:
+ branch_str = _doc ('development-branch')
+
+ # Initialize dictionaries for string formatting
+ subst = {}
+ subst[''] = dict ([i for i in globals ().items() if type (i[1]) is str])
+ subst[''].update (dict ([i for i in locals ().items() if type (i[1]) is str]))
+ for l in translation:
+ e = langdefs.LANGDICT[l].webext
+ if e:
+ subst[e] = {}
+ for name in subst['']:
+ subst[e][name] = translation[l] (subst[''][name])
+ # Do deeper string formatting as early as possible,
+ # so only one '%' formatting pass is needed later
+ for e in subst:
+ subst[e]['footer_name_version'] = subst[e]['footer_name_version'] % subst[e]
+ subst[e]['footer_report_links'] = subst[e]['footer_report_links'] % subst[e]
+
+ for prefix, ext_list in pages_dict.items ():
+ for lang_ext in ext_list:
+ file_name = langdefs.lang_file_name (prefix, lang_ext, '.html')
+ in_f = open (file_name)
+ s = in_f.read()
+ in_f.close()
+
+ s = s.replace ('%', '%%')
+ s = hack_urls (s, prefix)
+ s = add_header (s, prefix)
+
+ ### add footer
+ if footer_tag_re.search (s) == None:
+ s = add_footer (s, footer_tag + footer)
+
+ available, missing = find_translations (prefix, lang_ext)
+ page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target)
+ # Add menu after stripping: must not have autoselection for language menu.
+ page_flavors = add_menu (page_flavors, prefix, available, target, translation)
+ for k in page_flavors:
+ page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]]
+ out_f = open (name_filter (k), 'w')
+ out_f.write (page_flavors[k][1])
+ out_f.close()
+ # if the page is translated, a .en.html symlink is necessary for content negotiation
+ if target == 'online' and ext_list != ['']:
+ os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html'))
depth = ..
+SUBDIRS=aux build
+
SEXECUTABLES=convert-ly lilypond-book abc2ly etf2ly midi2ly lilypond-invoke-editor musicxml2ly lilysong lilymidi
STEPMAKE_TEMPLATES=script help2man po
--- /dev/null
+depth=../..
+
+EXTRA_DIST_FILES = $(call src-wildcard,*.sh) $(call src-wildcard,*.py)
+EXTRA_DIST_FILES += pfx2ttf.fontforge
+
+include $(depth)/make/stepmake.make
+
+default:
--- /dev/null
+#!/bin/sh
+
+if test "$1" == "--fresh"; then
+ fresh=yes
+fi
+
+if test ! -f config-cov.make; then
+ fresh=yes
+fi
+
+if test "$fresh" = "yes";
+then
+ ./configure --enable-config=cov --disable-optimising \
+ && make conf=cov -j2 clean \
+ && perl -i~ -pe 's/-pipe /-fprofile-arcs -ftest-coverage -pipe /g' config-cov.make \
+ && perl -i~ -pe 's/ -ldl / -lgcov -ldl /g' config-cov.make
+else
+ find -name '*.gcda' -exec rm '{}' ';'
+fi
+
+mkdir -p scripts/out-cov/
+touch scripts/out-cov/midi2ly scripts/out-cov/midi2ly.1
+make conf=cov -j2 && \
+ make conf=cov test-clean OUT_TEST=testcov LILYPOND_JOBS= && \
+ make conf=cov test OUT_TEST=testcov LILYPOND_JOBS='-dtrace-scheme-coverage '
+
+if test "$?" != "0"; then
+ tail -100 out-cov/test-run.log
+ exit 1
+fi
+
+depth=../..
+resultdir=out/coverage-results
+
+rm -rf $resultdir
+mkdir $resultdir
+cd $resultdir
+
+ln $depth/lily/* .
+ln $depth/scm/*.scm .
+mv $depth/input/regression/out-testcov/*.scm.cov .
+ln $depth/ly/*.ly .
+ln $depth/lily/out-cov/*[ch] .
+mkdir include
+ln $depth/lily/include/* include/
+ln $depth/flower/include/* include/
+for a in *[cl] *.yy
+do
+ gcov -o $depth/lily/out-cov/ -p $a > $a.gcov-summary
+done
+
+$depth/scripts/aux/coverage.py --uncovered *.cc > uncovered.txt
+$depth/scripts/aux/coverage.py --hotspots *.cc > hotspots.txt
+$depth/scripts/aux/coverage.py --summary *.cc > summary.txt
+$depth/scripts/aux/coverage.py --uncovered *.scm > uncovered-scheme.txt
+
+head -20 summary.txt
+
+cat <<EOF
+results in
+
+ out/coverage-results/summary.txt
+ out/coverage-results/uncovered.txt
+ out/coverage-results/uncovered-scheme.txt
+ out/coverage-results/hotspots.txt
+
+EOF
--- /dev/null
+#!/bin/sh
+
+if test "$1" == "--fresh"; then
+ fresh=yes
+fi
+
+if test ! -f config-prof.make; then
+ fresh=yes
+fi
+
+if test "$fresh" = "yes";
+then
+ ./configure --enable-config=prof --enable-optimising \
+ && perl -i~ -pe 's/-pipe /-pg -pipe /g' config-prof.make \
+ && perl -i~ -pe 's/ -ldl / -pg -ldl /g' config-prof.make
+fi
+
+make conf=prof -j2
+
+if test "$?" != "0"; then
+ exit 2
+fi
+
+depth=../..
+resultdir=out/profile-results
+
+rm -rf $resultdir
+mkdir $resultdir
+cd $resultdir
+
+
+cat > long-score.ly << EOF
+\version "2.10.0"
+foo = \new Staff \new Voice \repeat unfold 50 \relative { c4 d8[ d16( e]~ e16[ e e) f] g8 }
+\score {
+ \new ChoirStaff <<
+ \foo \foo \foo \foo
+ \foo \foo \foo \foo
+
+ >>
+ \midi {}
+ \layout {}
+}
+EOF
+
+rm gmon.sum
+
+exe=$depth/out-prof/bin/lilypond
+
+## todo: figure out representative sample.
+files="wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 mozart-hrn-3 mozart-hrn-3 long-score"
+
+
+
+$exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
+ -I $depth/input/mutopia/W.A.Mozart/ \
+ $files
+
+
+for a in *.profile; do
+ echo $a
+ cat $a
+done
+
+echo 'running gprof'
+gprof $exe > profile
+
+exit 0
+
+
+## gprof -s takes forever.
+for a in seq 1 3; do
+ for f in $files ; do
+ $exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
+ -I $depth/input/mutopia/W.A.Mozart/ \
+ $f
+
+ echo 'running gprof'
+ if test -f gmon.sum ; then
+ gprof -s $exe gmon.out gmon.sum
+ else
+ mv gmon.out gmon.sum
+ fi
+ done
+done
+
+gprof $exe gmon.sum > profile
--- /dev/null
+#!/usr/bin/env python
+
+"""
+check_texi_refs.py
+Interactive Texinfo cross-references checking and fixing tool
+
+"""
+
+
+import sys
+import re
+import os
+import optparse
+import imp
+
+outdir = 'out-www'
+
+log = sys.stderr
+stdout = sys.stdout
+
+file_not_found = 'file not found in include path'
+
+warn_not_fixed = '*** Warning: this broken x-ref has not been fixed!\n'
+
+opt_parser = optparse.OptionParser (usage='check_texi_refs.py [OPTION]... FILE',
+ description='''Check and fix \
+cross-references in a collection of Texinfo
+documents heavily cross-referenced each other.
+''')
+
+opt_parser.add_option ('-a', '--auto-fix',
+ help="Automatically fix cross-references whenever \
+it is possible",
+ action='store_true',
+ dest='auto_fix',
+ default=False)
+
+opt_parser.add_option ('-b', '--batch',
+ help="Do not run interactively",
+ action='store_false',
+ dest='interactive',
+ default=True)
+
+opt_parser.add_option ('-c', '--check-comments',
+ help="Also check commented out x-refs",
+ action='store_true',
+ dest='check_comments',
+ default=False)
+
+opt_parser.add_option ('-p', '--check-punctuation',
+ help="Check punctuation after x-refs",
+ action='store_true',
+ dest='check_punctuation',
+ default=False)
+
+opt_parser.add_option ("-I", '--include', help="add DIR to include path",
+ metavar="DIR",
+ action='append', dest='include_path',
+ default=[os.path.abspath (os.getcwd ())])
+
+(options, files) = opt_parser.parse_args ()
+
+class InteractionError (Exception):
+ pass
+
+
+manuals_defs = imp.load_source ('manuals_defs', files[0])
+manuals = {}
+
+def find_file (name, prior_directory='.'):
+ p = os.path.join (prior_directory, name)
+ out_p = os.path.join (prior_directory, outdir, name)
+ if os.path.isfile (p):
+ return p
+ elif os.path.isfile (out_p):
+ return out_p
+
+ # looking for file in include_path
+ for d in options.include_path:
+ p = os.path.join (d, name)
+ if os.path.isfile (p):
+ return p
+
+ # file not found in include_path: looking in `outdir' subdirs
+ for d in options.include_path:
+ p = os.path.join (d, outdir, name)
+ if os.path.isfile (p):
+ return p
+
+ raise EnvironmentError (1, file_not_found, name)
+
+
+exit_code = 0
+
+def set_exit_code (n):
+ global exit_code
+ exit_code = max (exit_code, n)
+
+
+if options.interactive:
+ try:
+ import readline
+ except:
+ pass
+
+ def yes_prompt (question, default=False, retries=3):
+ d = {True: 'y', False: 'n'}.get (default, False)
+ while retries:
+ a = raw_input ('%s [default: %s]' % (question, d) + '\n')
+ if a.lower ().startswith ('y'):
+ return True
+ if a.lower ().startswith ('n'):
+ return False
+ if a == '' or retries < 0:
+ return default
+ stdout.write ("Please answer yes or no.\n")
+ retries -= 1
+
+ def search_prompt ():
+ """Prompt user for a substring to look for in node names.
+
+If user input is empty or matches no node name, return None,
+otherwise return a list of (manual, node name, file) tuples.
+
+"""
+ substring = raw_input ("Enter a substring to search in node names \
+(press Enter to skip this x-ref):\n")
+ if not substring:
+ return None
+ substring = substring.lower ()
+ matches = []
+ for k in manuals:
+ matches += [(k, node, manuals[k]['nodes'][node][0])
+ for node in manuals[k]['nodes']
+ if substring in node.lower ()]
+ return matches
+
+else:
+ def yes_prompt (question, default=False, retries=3):
+ return default
+
+ def search_prompt ():
+ return None
+
+
+ref_re = re.compile \
+ ('@(ref|ruser|rlearning|rprogram|rglos)(?:\\{(?P<ref>[^,\\\\\\}]+?)|\
+named\\{(?P<refname>[^,\\\\]+?),(?P<display>[^,\\\\\\}]+?))\\}(?P<last>.)',
+ re.DOTALL)
+node_include_re = re.compile (r'(?m)^@(node|include)\s+(.+?)$')
+
+whitespace_re = re.compile (r'\s+')
+line_start_re = re.compile ('(?m)^')
+
+def which_line (index, newline_indices):
+ """Calculate line number of a given string index
+
+Return line number of string index index, where
+newline_indices is an ordered iterable of all newline indices.
+"""
+ inf = 0
+ sup = len (newline_indices) - 1
+ n = len (newline_indices)
+ while inf + 1 != sup:
+ m = (inf + sup) / 2
+ if index >= newline_indices [m]:
+ inf = m
+ else:
+ sup = m
+ return inf + 1
+
+
+comments_re = re.compile ('(?<!@)(@c(?:omment)? \
+.*?\\n|^@ignore\\n.*?\\n@end ignore\\n)', re.M | re.S)
+
+def calc_comments_boundaries (texinfo_doc):
+ return [(m.start (), m.end ()) for m in comments_re.finditer (texinfo_doc)]
+
+
+def is_commented_out (start, end, comments_boundaries):
+ for k in range (len (comments_boundaries)):
+ if (start > comments_boundaries[k][0]
+ and end <= comments_boundaries[k][1]):
+ return True
+ elif end <= comments_boundaries[k][0]:
+ return False
+ return False
+
+
+def read_file (f, d):
+ s = open (f).read ()
+ base = os.path.basename (f)
+ dir = os.path.dirname (f)
+
+ d['contents'][f] = s
+
+ d['newline_indices'][f] = [m.end () for m in line_start_re.finditer (s)]
+ if options.check_comments:
+ d['comments_boundaries'][f] = []
+ else:
+ d['comments_boundaries'][f] = calc_comments_boundaries (s)
+
+ for m in node_include_re.finditer (s):
+ if m.group (1) == 'node':
+ line = which_line (m.start (), d['newline_indices'][f])
+ d['nodes'][m.group (2)] = (f, line)
+
+ elif m.group (1) == 'include':
+ try:
+ p = find_file (m.group (2), dir)
+ except EnvironmentError, (errno, strerror):
+ if strerror == file_not_found:
+ continue
+ else:
+ raise
+ read_file (p, d)
+
+
+def read_manual (name):
+ """Look for all node names and cross-references in a Texinfo document
+
+Return a (manual, dictionary) tuple where manual is the cross-reference
+macro name defined by references_dict[name], and dictionary
+has the following keys:
+
+ 'nodes' is a dictionary of `node name':(file name, line number),
+
+ 'contents' is a dictionary of file:`full file contents',
+
+ 'newline_indices' is a dictionary of
+file:[list of beginning-of-line string indices],
+
+ 'comments_boundaries' is a list of (start, end) tuples,
+which contain string indices of start and end of each comment.
+
+Included files that can be found in the include path are processed too.
+
+"""
+ d = {}
+ d['nodes'] = {}
+ d['contents'] = {}
+ d['newline_indices'] = {}
+ d['comments_boundaries'] = {}
+ manual = manuals_defs.references_dict.get (name, '')
+ try:
+ f = find_file (name + '.tely')
+ except EnvironmentError, (errno, strerror):
+ if not strerror == file_not_found:
+ raise
+ else:
+ try:
+ f = find_file (name + '.texi')
+ except EnvironmentError, (errno, strerror):
+ if strerror == file_not_found:
+ sys.stderr.write (name + '.{texi,tely}: ' +
+ file_not_found + '\n')
+ return (manual, d)
+ else:
+ raise
+
+ log.write ("Processing manual %s (%s)\n" % (f, manual))
+ read_file (f, d)
+ return (manual, d)
+
+
+log.write ("Reading files...\n")
+
+manuals = dict ([read_manual (name)
+ for name in manuals_defs.references_dict.keys ()])
+
+ref_fixes = set ()
+bad_refs_count = 0
+fixes_count = 0
+
+def add_fix (old_type, old_ref, new_type, new_ref):
+ ref_fixes.add ((old_type, old_ref, new_type, new_ref))
+
+
+def lookup_fix (r):
+ found = []
+ for (old_type, old_ref, new_type, new_ref) in ref_fixes:
+ if r == old_ref:
+ found.append ((new_type, new_ref))
+ return found
+
+
+def preserve_linebreak (text, linebroken):
+ if linebroken:
+ if ' ' in text:
+ text = text.replace (' ', '\n', 1)
+ n = ''
+ else:
+ n = '\n'
+ else:
+ n = ''
+ return (text, n)
+
+
+def choose_in_numbered_list (message, string_list, sep=' ', retries=3):
+ S = set (string_list)
+ S.discard ('')
+ string_list = list (S)
+ numbered_list = sep.join ([str (j + 1) + '. ' + string_list[j]
+ for j in range (len (string_list))]) + '\n'
+ t = retries
+ while t > 0:
+ value = ''
+ stdout.write (message +
+ "(press Enter to discard and start a new search)\n")
+ input = raw_input (numbered_list)
+ if not input:
+ return ''
+ try:
+ value = string_list[int (input) - 1]
+ except IndexError:
+ stdout.write ("Error: index number out of range\n")
+ except ValueError:
+ matches = [input in v for v in string_list]
+ n = matches.count (True)
+ if n == 0:
+ stdout.write ("Error: input matches no item in the list\n")
+ elif n > 1:
+ stdout.write ("Error: ambiguous input (matches several items \
+in the list)\n")
+ else:
+ value = string_list[matches.index (True)]
+ if value:
+ return value
+ t -= 1
+ raise InteractionError ("%d retries limit exceeded" % retries)
+
+refs_count = 0
+
+def check_ref (manual, file, m):
+ global fixes_count, bad_refs_count, refs_count
+ refs_count += 1
+ bad_ref = False
+ fixed = True
+ type = m.group (1)
+ original_name = m.group ('ref') or m.group ('refname')
+ name = whitespace_re.sub (' ', original_name). strip ()
+ newline_indices = manuals[manual]['newline_indices'][file]
+ line = which_line (m.start (), newline_indices)
+ linebroken = '\n' in original_name
+ original_display_name = m.group ('display')
+ next_char = m.group ('last')
+ if original_display_name: # the xref has an explicit display name
+ display_linebroken = '\n' in original_display_name
+ display_name = whitespace_re.sub (' ', original_display_name). strip ()
+ commented_out = is_commented_out \
+ (m.start (), m.end (), manuals[manual]['comments_boundaries'][file])
+ useful_fix = not outdir in file
+
+ # check puncuation after x-ref
+ if options.check_punctuation and not next_char in '.,;:!?':
+ stdout.write ("Warning: %s: %d: `%s': x-ref \
+not followed by punctuation\n" % (file, line, name))
+
+ # validate xref
+ explicit_type = type
+ new_name = name
+
+ if type != 'ref' and type == manual and not commented_out:
+ if useful_fix:
+ fixed = False
+ bad_ref = True
+ stdout.write ("\n%s: %d: `%s': external %s x-ref should be internal\n"
+ % (file, line, name, type))
+ if options.auto_fix or yes_prompt ("Fix this?"):
+ type = 'ref'
+
+ if type == 'ref':
+ explicit_type = manual
+
+ if not name in manuals[explicit_type]['nodes'] and not commented_out:
+ bad_ref = True
+ fixed = False
+ stdout.write ('\n')
+ if type == 'ref':
+ stdout.write ("\e[1;31m%s: %d: `%s': wrong internal x-ref\e[0m\n"
+ % (file, line, name))
+ else:
+ stdout.write ("\e[1;31m%s: %d: `%s': wrong external `%s' x-ref\e[0m\n"
+ % (file, line, name, type))
+ # print context
+ stdout.write ('--\n' + manuals[manual]['contents'][file]
+ [newline_indices[max (0, line - 2)]:
+ newline_indices[min (line + 3,
+ len (newline_indices) - 1)]] +
+ '--\n')
+
+ # try to find the reference in other manuals
+ found = []
+ for k in [k for k in manuals if k != explicit_type]:
+ if name in manuals[k]['nodes']:
+ if k == manual:
+ found = ['ref']
+ stdout.write ("\e[1;32m found as internal x-ref\e[0m\n")
+ break
+ else:
+ found.append (k)
+ stdout.write ("\e[1;32m found as `%s' x-ref\e[0m\n" % k)
+
+ if (len (found) == 1
+ and (options.auto_fix or yes_prompt ("Fix this x-ref?"))):
+ add_fix (type, name, found[0], name)
+ type = found[0]
+ fixed = True
+
+ elif len (found) > 1 and useful_fix:
+ if options.interactive or options.auto_fix:
+ stdout.write ("* Several manuals contain this node name, \
+cannot determine manual automatically.\n")
+ if options.interactive:
+ t = choose_in_numbered_list ("Choose manual for this x-ref by \
+index number or beginning of name:\n", found)
+ if t:
+ add_fix (type, name, t, name)
+ type = t
+ fixed = True
+
+ if not fixed:
+ # try to find a fix already made
+ found = lookup_fix (name)
+
+ if len (found) == 1:
+ stdout.write ("Found one previous fix: %s `%s'\n" % found[0])
+ if options.auto_fix or yes_prompt ("Apply this fix?"):
+ type, new_name = found[0]
+ fixed = True
+
+ elif len (found) > 1:
+ if options.interactive or options.auto_fix:
+ stdout.write ("* Several previous fixes match \
+this node name, cannot fix automatically.\n")
+ if options.interactive:
+ concatened = choose_in_numbered_list ("Choose new manual \
+and x-ref by index number or beginning of name:\n", [''.join ([i[0], ' ', i[1]])
+ for i in found],
+ sep='\n')
+ if concatened:
+ type, new_name = concatenated.split (' ', 1)
+ fixed = True
+
+ if not fixed:
+ # all previous automatic fixing attempts failed,
+ # ask user for substring to look in node names
+ while True:
+ node_list = search_prompt ()
+ if node_list == None:
+ if options.interactive:
+ stdout.write (warn_not_fixed)
+ break
+ elif not node_list:
+ stdout.write ("No matched node names.\n")
+ else:
+ concatenated = choose_in_numbered_list ("Choose \
+node name and manual for this x-ref by index number or beginning of name:\n", \
+ [' '.join ([i[0], i[1], '(in %s)' % i[2]])
+ for i in node_list],
+ sep='\n')
+ if concatenated:
+ t, z = concatenated.split (' ', 1)
+ new_name = z.split (' (in ', 1)[0]
+ add_fix (type, name, t, new_name)
+ type = t
+ fixed = True
+ break
+
+ if fixed and type == manual:
+ type = 'ref'
+ bad_refs_count += int (bad_ref)
+ if bad_ref and not useful_fix:
+ stdout.write ("*** Warning: this file is automatically generated, \
+please fix the code source instead of generated documentation.\n")
+
+ # compute returned string
+ if new_name == name:
+ if bad_ref and (options.interactive or options.auto_fix):
+ # only the type of the ref was fixed
+ fixes_count += int (fixed)
+ if original_display_name:
+ return ('@%snamed{%s,%s}' % (type, original_name, original_display_name)) + next_char
+ else:
+ return ('@%s{%s}' % (type, original_name)) + next_char
+ else:
+ fixes_count += int (fixed)
+ (ref, n) = preserve_linebreak (new_name, linebroken)
+ if original_display_name:
+ if bad_ref:
+ stdout.write ("Current display name is `%s'\n")
+ display_name = raw_input \
+ ("Enter a new display name or press enter to keep the existing name:\n") \
+ or display_name
+ (display_name, n) = preserve_linebreak (display_name, display_linebroken)
+ else:
+ display_name = original_display_name
+ return ('@%snamed{%s,%s}' % (type, ref, display_name)) + \
+ next_char + n
+ else:
+ return ('@%s{%s}' % (type, ref)) + next_char + n
+
+
+log.write ("Checking cross-references...\n")
+
+try:
+ for key in manuals:
+ for file in manuals[key]['contents']:
+ s = ref_re.sub (lambda m: check_ref (key, file, m),
+ manuals[key]['contents'][file])
+ if s != manuals[key]['contents'][file]:
+ open (file, 'w').write (s)
+except KeyboardInterrupt:
+ log.write ("Operation interrupted, exiting.\n")
+ sys.exit (2)
+except InteractionError, instance:
+ log.write ("Operation refused by user: %s\nExiting.\n" % instance)
+ sys.exit (3)
+
+log.write ("\e[1;36mDone: %d x-refs found, %d bad x-refs found, fixed %d.\e[0m\n" %
+ (refs_count, bad_refs_count, fixes_count))
--- /dev/null
+#!/usr/bin/env python
+
+import __main__
+import optparse
+import os
+import sys
+
+import langdefs
+import buildlib
+
+verbose = 0
+use_colors = False
+lang = 'C'
+C = lang
+
+def dir_lang (file, lang, lang_dir_index):
+ path_components = file.split ('/')
+ path_components[lang_dir_index] = lang
+ return os.path.join (*path_components)
+
+def do_file (file_name, lang_codes, buildlib):
+ if verbose:
+ sys.stderr.write ('%s...\n' % file_name)
+ split_file_name = file_name.split ('/')
+ d1, d2 = split_file_name[0:2]
+ if d1 in lang_codes:
+ check_lang = d1
+ lang_dir_index = 0
+ elif d2 in lang_codes:
+ check_lang = d2
+ lang_dir_index = 1
+ else:
+ check_lang = lang
+ if check_lang == C:
+ raise Exception ('cannot determine language for ' + file_name)
+
+ original = dir_lang (file_name, '', lang_dir_index)
+ translated_contents = open (file_name).read ()
+ (diff_string, error) \
+ = buildlib.check_translated_doc (original,
+ file_name,
+ translated_contents,
+ color=use_colors and not update_mode)
+
+ if error:
+ sys.stderr.write ('warning: %s: %s' % (file_name, error))
+
+ if update_mode:
+ if error or len (diff_string) >= os.path.getsize (original):
+ buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + original)
+ elif diff_string:
+ diff_file = original + '.diff'
+ f = open (diff_file, 'w')
+ f.write (diff_string)
+ f.close ()
+ buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + diff_file)
+ os.remove (diff_file)
+ else:
+ sys.stdout.write (diff_string)
+
+def usage ():
+ sys.stdout.write (r'''
+Usage:
+check-translation [--language=LANG] [--verbose] [--update] FILE...
+
+This script is licensed under the GNU GPL.
+''')
+
+def do_options ():
+ global lang, verbose, update_mode, use_colors
+
+ p = optparse.OptionParser (usage="check-translation [--language=LANG] [--verbose] FILE...",
+ description="This script is licensed under the GNU GPL.")
+ p.add_option ("--language",
+ action='store',
+ default='site',
+ dest="language")
+ p.add_option ("--no-color",
+ action='store_false',
+ default=True,
+ dest="color",
+ help="do not print ANSI-cooured output")
+ p.add_option ("--verbose",
+ action='store_true',
+ default=False,
+ dest="verbose",
+ help="print details, including executed shell commands")
+ p.add_option ('-u', "--update",
+ action='store_true',
+ default=False,
+ dest='update_mode',
+ help='call $EDITOR to update the translation')
+
+ (options, files) = p.parse_args ()
+ verbose = options.verbose
+ lang = options.language
+ use_colors = options.color
+ update_mode = options.update_mode
+
+ return files
+
+def main ():
+ global update_mode, text_editor
+
+ files = do_options ()
+ if 'EDITOR' in os.environ:
+ text_editor = os.environ['EDITOR']
+ else:
+ update_mode = False
+
+ buildlib.verbose = verbose
+
+ for i in files:
+ do_file (i, langdefs.LANGDICT.keys (), buildlib)
+
+if __name__ == '__main__':
+ main ()
--- /dev/null
+#!/usr/bin/env python
+
+import os
+import glob
+import re
+import sys
+import optparse
+
+#File 'accidental-engraver.cc'
+#Lines executed:87.70% of 252
+
+def summary (args):
+ results = []
+ for f in args:
+ str = open (f).read ()
+ m = re.search ("File '([^']+.cc)'\s*Lines executed:([0-9.]+)% of ([0-9]+)", str)
+
+ if m and '/usr/lib' in m.group (1):
+ continue
+
+ if m:
+ cov = float (m.group (2))
+ lines = int (m.group (3))
+ pain = lines * (100.0 - cov)
+ file = m.group (1)
+ tup = (pain, locals ().copy())
+
+ results.append(tup)
+
+ results.sort ()
+ results.reverse()
+
+ print 'files sorted by number of untested lines (decreasing)'
+ print
+ print '%5s (%6s): %s' % ('cov %', 'lines', 'file')
+ print '----------------------------------------------'
+
+ for (pain, d) in results:
+ print '%(cov)5.2f (%(lines)6d): %(file)s' % d
+
+class Chunk:
+ def __init__ (self, range, coverage_count, all_lines, file):
+ assert coverage_count >= 0
+ assert type (range) == type (())
+
+ self.coverage_count = coverage_count
+ self.range = range
+ self.all_lines = all_lines
+ self.file = file
+
+ def length (self):
+ return self.range[1] - self.range[0]
+
+ def text (self):
+ return ''.join ([l[2] for l in self.lines()])
+
+ def lines (self):
+ return self.all_lines[self.range[0]:
+ self.range[1]]
+ def widen (self):
+ self.range = (min (self.range[0] -1, 0),
+ self.range[0] +1)
+ def write (self):
+ print 'chunk in', self.file
+ for (c, n, l) in self.lines ():
+ cov = '%d' % c
+ if c == 0:
+ cov = '#######'
+ elif c < 0:
+ cov = ''
+ sys.stdout.write ('%8s:%8d:%s' % (cov, n, l))
+
+ def uncovered_score (self):
+ return self.length ()
+
+class SchemeChunk (Chunk):
+ def uncovered_score (self):
+ text = self.text ()
+ if (text.startswith ('(define ')
+ and not text.startswith ('(define (')):
+ return 0
+
+ if text.startswith ('(use-modules '):
+ return 0
+
+ if (text.startswith ('(define-public ')
+ and not text.startswith ('(define-public (')):
+ return 0
+
+ return len ([l for (c,n,l) in self.lines() if (c == 0)])
+
+def read_gcov (f):
+ ls = []
+
+ in_lines = [l for l in open (f).readlines ()]
+ (count_len, line_num_len) = tuple (map (len, in_lines[0].split (':')[:2]))
+
+ for l in in_lines:
+ c = l[:count_len].strip ()
+ l = l[count_len+1:]
+ n = int (l[:line_num_len].strip ())
+
+ if n == 0:
+ continue
+
+ if '#' in c:
+ c = 0
+ elif c == '-':
+ c = -1
+ else:
+ c = int (c)
+
+ l = l[line_num_len+1:]
+
+ ls.append ((c,n,l))
+
+ return ls
+
+def get_c_chunks (ls, file):
+ chunks = []
+ chunk = []
+
+ last_c = -1
+ for (c, n, l) in ls:
+ if not (c == last_c or c < 0 and l != '}\n'):
+ if chunk and last_c >= 0:
+ nums = [n-1 for (n, l) in chunk]
+ chunks.append (Chunk ((min (nums), max (nums)+1),
+ last_c, ls, file))
+ chunk = []
+
+ chunk.append ((n,l))
+ if c >= 0:
+ last_c = c
+
+ return chunks
+
+def get_scm_chunks (ls, file):
+ chunks = []
+ chunk = []
+
+ def new_chunk ():
+ if chunk:
+ nums = [n-1 for (n, l) in chunk]
+ chunks.append (SchemeChunk ((min (nums), max (nums)+1),
+ max (last_c, 0), ls, file))
+ chunk[:] = []
+
+ last_c = -1
+ for (cov_count, line_number, line) in ls:
+ if line.startswith ('('):
+ new_chunk ()
+ last_c = -1
+
+ chunk.append ((line_number, line))
+ if cov_count >= 0:
+ last_c = cov_count
+
+ return chunks
+
+def widen_chunk (ch, ls):
+ a -= 1
+ b += 1
+
+ return [(n, l) for (c, n, l) in ls[a:b]]
+
+
+def extract_chunks (file):
+ try:
+ ls = read_gcov (file)
+ except IOError, s :
+ print s
+ return []
+
+ cs = []
+ if 'scm' in file:
+ cs = get_scm_chunks (ls, file)
+ else:
+ cs = get_c_chunks (ls, file)
+ return cs
+
+
+def filter_uncovered (chunks):
+ def interesting (c):
+ if c.coverage_count > 0:
+ return False
+
+ t = c.text()
+ for stat in ('warning', 'error', 'print', 'scm_gc_mark'):
+ if stat in t:
+ return False
+ return True
+
+ return [c for c in chunks if interesting (c)]
+
+
+def main ():
+ p = optparse.OptionParser (usage="usage coverage.py [options] files",
+ description="")
+ p.add_option ("--summary",
+ action='store_true',
+ default=False,
+ dest="summary")
+
+ p.add_option ("--hotspots",
+ default=False,
+ action='store_true',
+ dest="hotspots")
+
+ p.add_option ("--uncovered",
+ default=False,
+ action='store_true',
+ dest="uncovered")
+
+
+ (options, args) = p.parse_args ()
+
+
+ if options.summary:
+ summary (['%s.gcov-summary' % s for s in args])
+
+ if options.uncovered or options.hotspots:
+ chunks = []
+ for a in args:
+ name = a
+ if name.endswith ('scm'):
+ name += '.cov'
+ else:
+ name += '.gcov'
+
+ chunks += extract_chunks (name)
+
+ if options.uncovered:
+ chunks = filter_uncovered (chunks)
+ chunks = [(c.uncovered_score (), c) for c in chunks if c.uncovered_score() > 0]
+ elif options.hotspots:
+ chunks = [((c.coverage_count, -c.length()), c) for c in chunks]
+
+
+ chunks.sort ()
+ chunks.reverse ()
+ for (score, c) in chunks:
+ c.write ()
+
+
+
+if __name__ == '__main__':
+ main ()
--- /dev/null
+#!/usr/bin/env python
+import sys
+import re
+import os
+
+
+full_paths = {}
+incs = {}
+inc_re = re.compile ('^#include "([^"]+)"')
+def parse_file (fn):
+ lst = []
+
+ lc = 0
+ for l in open (fn).readlines():
+ lc += 1
+ m = inc_re.search (l)
+ if m:
+ lst.append ((lc, m.group (1)))
+
+ base = os.path.split (fn)[1]
+ full_paths[base] = fn
+ incs[base] = lst
+
+
+def has_include (f, name):
+ try:
+ return name in [b for (a,b) in incs[f]]
+ except KeyError:
+ return False
+
+for a in sys.argv:
+ parse_file (a)
+
+print '-*-compilation-*-'
+for (f, lst) in incs.items ():
+ for (n, inc) in lst:
+ for (n2, inc2) in lst:
+ if has_include (inc2, inc):
+ print "%s:%d: already have %s from %s" % (full_paths[f], n,
+ inc, inc2)
+ break
+
+
+
--- /dev/null
+#!/usr/bin/env python
+
+# fixcc -- nitpick lily's c++ code
+
+# TODO
+# * maintainable rules: regexp's using whitespace (?x) and match names
+# <identifier>)
+# * trailing `*' vs. function definition
+# * do not break/change indentation of fixcc-clean files
+# * check lexer, parser
+# * rewrite in elisp, add to cc-mode
+# * using regexes is broken by design
+# * ?
+# * profit
+
+import __main__
+import getopt
+import os
+import re
+import string
+import sys
+import time
+
+COMMENT = 'COMMENT'
+STRING = 'STRING'
+GLOBAL_CXX = 'GC++'
+CXX = 'C++'
+verbose_p = 0
+indent_p = 0
+
+rules = {
+ GLOBAL_CXX:
+ [
+ # delete gratuitous block
+ ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
+ '\n\\2;'),
+ ],
+ CXX:
+ [
+ # space before parenthesis open
+ ('([^\( \]])[ \t]*\(', '\\1 ('),
+ # space after comma
+ ("\([^'],\)[ \t]*", '\1 '),
+ # delete gratuitous block
+ ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
+ '\n\\2;'),
+ # delete inline tabs
+ ('(\w)\t+', '\\1 '),
+ # delete inline double spaces
+ (' *', ' '),
+ # delete space after parenthesis open
+ ('\([ \t]*', '('),
+ # delete space before parenthesis close
+ ('[ \t]*\)', ')'),
+ # delete spaces after prefix
+ ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'),
+ # delete spaces before postfix
+ ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'),
+ # delete space after parenthesis close
+ #('\)[ \t]*([^\w])', ')\\1'),
+ # delete space around operator
+ # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
+ ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
+ # delete space after operator
+ ('(::)([ \t]*)([\w\(\)])', '\\1\\3'),
+ # delete superflous space around operator
+ ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'),
+ # space around operator1
+ ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
+ # space around operator2
+ ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'),
+ # space around operator3
+ ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
+ # space around operator4
+ ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'),
+ # space around +/-; exponent
+ ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'),
+ ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'),
+ # trailing operator
+ (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '),
+ # pointer
+ ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
+ ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
+ #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'),
+ # pointer with template
+ ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'),
+ #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'),
+ # unary pointer, minus, not
+ ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'),
+ # space after `operator'
+ ('(\Woperator) *([^\w\s])', '\\1 \\2'),
+ # dangling brace close
+ ('\n[ \t]*(\n[ \t]*})', '\\1'),
+ # dangling newline
+ ('\n[ \t]*\n[ \t]*\n', '\n\n'),
+ # dangling parenthesis open
+ #('[ \t]*\n[ \t]*\([ \t]*\n', '('),
+ ('\([ \t]*\n', '('),
+ # dangling parenthesis close
+ ('\n[ \t]*\)', ')'),
+ # dangling comma
+ ('\n[ \t]*,', ','),
+ # dangling semicolon
+ ('\n[ \t]*;', ';'),
+ # brace open
+ ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'),
+ # brace open backslash
+ ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'),
+ # brace close
+ ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'),
+ # brace close backslash
+ ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'),
+ # delete space after `operator'
+ #('(\Woperator) (\W)', '\\1\\2'),
+ # delete space after case, label
+ ('(\W(case|label) ([\w]+)) :', '\\1:'),
+ # delete space before comma
+ ('[ \t]*,', ','),
+ # delete space before semicolon
+ ('[ \t]*;', ';'),
+ # delete space before eol-backslash
+ ('[ \t]*\\\\\n', '\\\n'),
+ # delete trailing whitespace
+ ('[ \t]*\n', '\n'),
+
+ ## Deuglify code that also gets ugly by rules above.
+ # delete newline after typedef struct
+ ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'),
+ # delete spaces around template brackets
+ #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'),
+ ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'),
+ ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'),
+ ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'),
+ ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'),
+ # do {..} while
+ ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'),
+
+ ## Fix code that gets broken by rules above.
+ ##('->\s+\*', '->*'),
+ # delete space before #define x()
+ ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('),
+ # add space in #define x ()
+ ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)',
+ '#define \\1 \\2'),
+ # delete space in #include <>
+ ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>',
+ '#include <\\1\\2\\3>'),
+ # delete backslash before empty line (emacs' indent region is broken)
+ ('\\\\\n\n', '\n\n'),
+ ],
+
+ COMMENT:
+ [
+ # delete trailing whitespace
+ ('[ \t]*\n', '\n'),
+ # delete empty first lines
+ ('(/\*\n)\n*', '\\1'),
+ # delete empty last lines
+ ('\n*(\n\*/)', '\\1'),
+ ## delete newline after start?
+ #('/(\*)\n', '\\1'),
+ ## delete newline before end?
+ #('\n(\*/)', '\\1'),
+ ],
+ }
+
+# Recognize special sequences in the input.
+#
+# (?P<name>regex) -- Assign result of REGEX to NAME.
+# *? -- Match non-greedily.
+# (?m) -- Multiline regex: Make ^ and $ match at each line.
+# (?s) -- Make the dot match all characters including newline.
+# (?x) -- Ignore whitespace in patterns.
+no_match = 'a\ba'
+snippet_res = {
+ CXX: {
+ 'multiline_comment':
+ r'''(?sx)
+ (?P<match>
+ (?P<code>
+ [ \t]*/\*.*?\*/))''',
+
+ 'singleline_comment':
+ r'''(?mx)
+ ^.*
+ (?P<match>
+ (?P<code>
+ [ \t]*//([ \t][^\n]*|)\n))''',
+
+ 'string':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ "([^\"\n](\")*)*"))''',
+
+ 'char':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ '([^']+|\')))''',
+
+ 'include':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ "#[ \t]*include[ \t]*<[^>]*>''',
+ },
+ }
+
+class Chunk:
+ def replacement_text (self):
+ return ''
+
+ def filter_text (self):
+ return self.replacement_text ()
+
+class Substring (Chunk):
+ def __init__ (self, source, start, end):
+ self.source = source
+ self.start = start
+ self.end = end
+
+ def replacement_text (self):
+ s = self.source[self.start:self.end]
+ if verbose_p:
+ sys.stderr.write ('CXX Rules')
+ for i in rules[CXX]:
+ if verbose_p:
+ sys.stderr.write ('.')
+ #sys.stderr.write ('\n\n***********\n')
+ #sys.stderr.write (i[0])
+ #sys.stderr.write ('\n***********\n')
+ #sys.stderr.write ('\n=========>>\n')
+ #sys.stderr.write (s)
+ #sys.stderr.write ('\n<<=========\n')
+ s = re.sub (i[0], i[1], s)
+ if verbose_p:
+ sys.stderr.write ('done\n')
+ return s
+
+
+class Snippet (Chunk):
+ def __init__ (self, type, match, format):
+ self.type = type
+ self.match = match
+ self.hash = 0
+ self.options = []
+ self.format = format
+
+ def replacement_text (self):
+ return self.match.group ('match')
+
+ def substring (self, s):
+ return self.match.group (s)
+
+ def __repr__ (self):
+ return `self.__class__` + ' type = ' + self.type
+
+class Multiline_comment (Snippet):
+ def __init__ (self, source, match, format):
+ self.type = type
+ self.match = match
+ self.hash = 0
+ self.options = []
+ self.format = format
+
+ def replacement_text (self):
+ s = self.match.group ('match')
+ if verbose_p:
+ sys.stderr.write ('COMMENT Rules')
+ for i in rules[COMMENT]:
+ if verbose_p:
+ sys.stderr.write ('.')
+ s = re.sub (i[0], i[1], s)
+ return s
+
+snippet_type_to_class = {
+ 'multiline_comment': Multiline_comment,
+# 'string': Multiline_comment,
+# 'include': Include_snippet,
+}
+
+def find_toplevel_snippets (s, types):
+ if verbose_p:
+ sys.stderr.write ('Dissecting')
+
+ res = {}
+ for i in types:
+ res[i] = re.compile (snippet_res[format][i])
+
+ snippets = []
+ index = 0
+ ## found = dict (map (lambda x: (x, None),
+ ## types))
+ ## urg python2.1
+ found = {}
+ map (lambda x, f = found: f.setdefault (x, None),
+ types)
+
+ # We want to search for multiple regexes, without searching
+ # the string multiple times for one regex.
+ # Hence, we use earlier results to limit the string portion
+ # where we search.
+ # Since every part of the string is traversed at most once for
+ # every type of snippet, this is linear.
+
+ while 1:
+ if verbose_p:
+ sys.stderr.write ('.')
+ first = None
+ endex = 1 << 30
+ for type in types:
+ if not found[type] or found[type][0] < index:
+ found[type] = None
+ m = res[type].search (s[index:endex])
+ if not m:
+ continue
+
+ cl = Snippet
+ if snippet_type_to_class.has_key (type):
+ cl = snippet_type_to_class[type]
+ snip = cl (type, m, format)
+ start = index + m.start ('match')
+ found[type] = (start, snip)
+
+ if found[type] \
+ and (not first \
+ or found[type][0] < found[first][0]):
+ first = type
+
+ # FIXME.
+
+ # Limiting the search space is a cute
+ # idea, but this *requires* to search
+ # for possible containing blocks
+ # first, at least as long as we do not
+ # search for the start of blocks, but
+ # always/directly for the entire
+ # @block ... @end block.
+
+ endex = found[first][0]
+
+ if not first:
+ snippets.append (Substring (s, index, len (s)))
+ break
+
+ (start, snip) = found[first]
+ snippets.append (Substring (s, index, start))
+ snippets.append (snip)
+ found[first] = None
+ index = start + len (snip.match.group ('match'))
+
+ return snippets
+
+def nitpick_file (outdir, file):
+ s = open (file).read ()
+
+ for i in rules[GLOBAL_CXX]:
+ s = re.sub (i[0], i[1], s)
+
+ # FIXME: Containing blocks must be first, see
+ # find_toplevel_snippets.
+ # We leave simple strings be part of the code
+ snippet_types = (
+ 'multiline_comment',
+ 'singleline_comment',
+ 'string',
+# 'char',
+ )
+
+ chunks = find_toplevel_snippets (s, snippet_types)
+ #code = filter (lambda x: is_derived_class (x.__class__, Substring),
+ # chunks)
+
+ t = string.join (map (lambda x: x.filter_text (), chunks), '')
+ fixt = file
+ if s != t:
+ if not outdir:
+ os.system ('mv %s %s~' % (file, file))
+ else:
+ fixt = os.path.join (outdir,
+ os.path.basename (file))
+ h = open (fixt, "w")
+ h.write (t)
+ h.close ()
+ if s != t or indent_p:
+ indent_file (fixt)
+
+def indent_file (file):
+ emacs = '''emacs\
+ --no-window-system\
+ --batch\
+ --no-site-file\
+ --no-init-file\
+ %(file)s\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "cc-mode")
+ (c++-mode)
+ (indent-region (point-min) (point-max))
+ (if (buffer-modified-p (current-buffer))
+ (save-buffer)))' ''' % vars ()
+ emacsclient = '''emacsclient\
+ --socket-name=%(socketdir)s/%(socketname)s\
+ --no-wait\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "cc-mode")
+ (find-file "%(file)s")
+ (c++-mode)
+ (indent-region (point-min) (point-max))
+ (if (buffer-modified-p (current-buffer))
+ (save-buffer)))' ''' \
+ % { 'file': file,
+ 'socketdir' : socketdir,
+ 'socketname' : socketname, }
+ if verbose_p:
+ sys.stderr.write (emacs)
+ sys.stderr.write ('\n')
+ os.system (emacs)
+
+
+def usage ():
+ sys.stdout.write (r'''
+Usage:
+fixcc [OPTION]... FILE...
+
+Options:
+ --help
+ --indent reindent, even if no changes
+ --verbose
+ --test
+
+Typical use with LilyPond:
+
+ fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out)
+
+This script is licensed under the GNU GPL
+''')
+
+def do_options ():
+ global indent_p, outdir, verbose_p
+ (options, files) = getopt.getopt (sys.argv[1:], '',
+ ['help', 'indent', 'outdir=',
+ 'test', 'verbose'])
+ for (o, a) in options:
+ if o == '--help':
+ usage ()
+ sys.exit (0)
+ elif o == '--indent':
+ indent_p = 1
+ elif o == '--outdir':
+ outdir = a
+ elif o == '--verbose':
+ verbose_p = 1
+ elif o == '--test':
+ test ()
+ sys.exit (0)
+ else:
+ assert unimplemented
+ if not files:
+ usage ()
+ sys.exit (2)
+ return files
+
+
+outdir = 0
+format = CXX
+socketdir = '/tmp/fixcc'
+socketname = 'fixcc%d' % os.getpid ()
+
+def setup_client ():
+ #--no-window-system\
+ #--batch\
+ os.unlink (os.path.join (socketdir, socketname))
+ os.mkdir (socketdir, 0700)
+ emacs='''emacs\
+ --no-site-file\
+ --no-init-file\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "server")
+ (setq server-socket-dir "%(socketdir)s")
+ (setq server-name "%(socketname)s")
+ (server-start)
+ (while t) (sleep 1000))' ''' \
+ % { 'socketdir' : socketdir,
+ 'socketname' : socketname, }
+
+ if not os.fork ():
+ os.system (emacs)
+ sys.exit (0)
+ while not os.path.exists (os.path.join (socketdir, socketname)):
+ time.sleep (1)
+
+def main ():
+ #emacsclient should be faster, but this does not work yet
+ #setup_client ()
+ files = do_options ()
+ if outdir and not os.path.isdir (outdir):
+ os.makedirs (outdir)
+ for i in files:
+ sys.stderr.write ('%s...\n' % i)
+ nitpick_file (outdir, i)
+
+
+## TODO: make this compilable and check with g++
+TEST = '''
+#include <libio.h>
+#include <map>
+class
+ostream ;
+
+class Foo {
+public: static char* foo ();
+std::map<char*,int>* bar (char, char) { return 0; }
+};
+typedef struct
+{
+ Foo **bar;
+} String;
+
+ostream &
+operator << (ostream & os, String d);
+
+typedef struct _t_ligature
+{
+ char *succ, *lig;
+ struct _t_ligature * next;
+} AFM_Ligature;
+
+typedef std::map < AFM_Ligature const *, int > Bar;
+
+ /**
+ (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
+ */
+
+/* ||
+* vv
+* !OK OK
+*/
+/* ||
+ vv
+ !OK OK
+*/
+char *
+Foo:: foo ()
+{
+int
+i
+;
+ char* a= &++ i ;
+ a [*++ a] = (char*) foe (*i, &bar) *
+ 2;
+ int operator double ();
+ std::map<char*,int> y =*bar(-*a ,*b);
+ Interval_t<T> & operator*= (T r);
+ Foo<T>*c;
+ int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2);
+ delete *p;
+ if (abs (f)*2 > abs (d) *FUDGE)
+ ;
+ while (0);
+ for (; i<x foo(); foo>bar);
+ for (; *p && > y;
+ foo > bar)
+;
+ do {
+ ;;;
+ }
+ while (foe);
+
+ squiggle. extent;
+ 1 && * unsmob_moment (lf);
+ line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm
+(): SCM_EOL);
+ case foo: k;
+
+ if (0) {a=b;} else {
+ c=d;
+ }
+
+ cookie_io_functions_t Memory_out_stream::functions_ = {
+ Memory_out_stream::reader,
+ ...
+ };
+
+ int compare (Array < Pitch> *, Array < Pitch> *);
+ original_ = (Grob *) & s;
+ Drul_array< Link_array<Grob> > o;
+}
+
+ header_.char_info_pos = (6 + header_length) * 4;
+ return ly_bool2scm (*ma < * mb);
+
+ 1 *::sign(2);
+
+ (shift) *-d;
+
+ a = 0 ? *x : *y;
+
+a = "foo() 2,2,4";
+{
+ if (!span_)
+ {
+ span_ = make_spanner ("StaffSymbol", SCM_EOL);
+ }
+}
+{
+ if (!span_)
+ {
+ span_ = make_spanner (StaffSymbol, SCM_EOL);
+ }
+}
+'''
+
+def test ():
+ test_file = 'fixcc.cc'
+ open (test_file, 'w').write (TEST)
+ nitpick_file (outdir, test_file)
+ sys.stdout.write (open (test_file).read ())
+
+if __name__ == '__main__':
+ main ()
+
--- /dev/null
+#!/usr/bin/env python
+
+import sys
+import os
+import glob
+import re
+
+USAGE = ''' Usage: makelsr.py LSR_SNIPPETS_DIR
+This script must be run from top of the source tree;
+it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR.
+'''
+
+LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it
+%% This file is in the public domain.
+'''
+
+LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new
+%% This file is in the public domain.
+'''
+
+DEST = os.path.join ('input', 'lsr')
+NEW_LYS = os.path.join ('input', 'new')
+TEXIDOCS = os.path.join ('input', 'texidocs')
+
+TAGS = []
+# NR 1
+TAGS.extend (['pitches', 'rhythms', 'expressive-marks',
+'repeats', 'simultaneous-notes', 'staff-notation',
+'editorial-annotations', 'text'])
+# NR 2
+TAGS.extend (['vocal-music', 'chords', 'keyboards',
+'percussion', 'fretted-strings', 'unfretted-strings',
+'ancient-notation', 'winds', 'world-music'
+])
+
+# other
+TAGS.extend (['contexts-and-engravers', 'tweaks-and-overrides',
+'paper-and-layout', 'breaks', 'spacing', 'midi', 'titles', 'template'])
+
+def exit_with_usage (n=0):
+ sys.stderr.write (USAGE)
+ sys.exit (n)
+
+try:
+ in_dir = sys.argv[1]
+except:
+ exit_with_usage (2)
+
+if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)):
+ exit_with_usage (3)
+
+unsafe = []
+unconverted = []
+notags_files = []
+
+# mark the section that will be printed verbatim by lilypond-book
+end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S)
+
+def mark_verbatim_section (ly_code):
+ return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1)
+
+# '% LSR' comments are to be stripped
+lsr_comment_re = re.compile (r'\s*%+\s*LSR.*')
+
+begin_header_re = re.compile (r'\\header\s*{', re.M)
+
+# add tags to ly files from LSR
+def add_tags (ly_code, tags):
+ return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1)
+
+def copy_ly (srcdir, name, tags):
+ global unsafe
+ global unconverted
+ dest = os.path.join (DEST, name)
+ tags = ', '.join (tags)
+ s = open (os.path.join (srcdir, name)).read ()
+
+ texidoc_translations_path = os.path.join (TEXIDOCS,
+ os.path.splitext (name)[0] + '.texidoc')
+ if os.path.exists (texidoc_translations_path):
+ texidoc_translations = open (texidoc_translations_path).read ()
+ # Since we want to insert the translations verbatim using a
+ # regexp, \\ is understood as ONE escaped backslash. So we have
+ # to escape those backslashes once more...
+ texidoc_translations = texidoc_translations.replace ('\\', '\\\\')
+ s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1)
+
+ if in_dir in srcdir:
+ s = LY_HEADER_LSR + add_tags (s, tags)
+ else:
+ s = LY_HEADER_NEW + s
+
+ s = mark_verbatim_section (s)
+ s = lsr_comment_re.sub ('', s)
+ open (dest, 'w').write (s)
+
+ e = os.system ("convert-ly -e '%s'" % dest)
+ if e:
+ unconverted.append (dest)
+ if os.path.exists (dest + '~'):
+ os.remove (dest + '~')
+ # -V seems to make unsafe snippets fail nicer/sooner
+ e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest)
+ if e:
+ unsafe.append (dest)
+
+def read_source_with_dirs (src):
+ s = {}
+ l = {}
+ for tag in TAGS:
+ srcdir = os.path.join (src, tag)
+ l[tag] = set (map (os.path.basename, glob.glob (os.path.join (srcdir, '*.ly'))))
+ for f in l[tag]:
+ if f in s:
+ s[f][1].append (tag)
+ else:
+ s[f] = (srcdir, [tag])
+ return s, l
+
+
+tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"')
+
+def read_source (src):
+ s = {}
+ l = dict ([(tag, set()) for tag in TAGS])
+ for f in glob.glob (os.path.join (src, '*.ly')):
+ basename = os.path.basename (f)
+ m = tags_re.search (open (f, 'r').read ())
+ if m:
+ file_tags = [tag.strip() for tag in m.group (1). split(',')]
+ s[basename] = (src, file_tags)
+ [l[tag].add (basename) for tag in file_tags if tag in TAGS]
+ else:
+ notags_files.append (f)
+ return s, l
+
+
+def dump_file_list (file, list):
+ f = open (file, 'w')
+ f.write ('\n'.join (list) + '\n')
+
+## clean out existing lys and generated files
+map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) +
+ glob.glob (os.path.join (DEST, '*.snippet-list')))
+
+# read LSR source where tags are defined by subdirs
+snippets, tag_lists = read_source_with_dirs (in_dir)
+# read input/new where tags are directly
+s, l = read_source (NEW_LYS)
+snippets.update (s)
+for t in TAGS:
+ tag_lists[t].update (l[t])
+
+for (name, (srcdir, tags)) in snippets.items ():
+ copy_ly (srcdir, name, tags)
+
+for (tag, file_set) in tag_lists.items ():
+ dump_file_list (os.path.join (DEST, tag + '.snippet-list'), sorted(file_set))
+
+if unconverted:
+ sys.stderr.write ('These files could not be converted successfully by convert-ly:\n')
+ sys.stderr.write ('\n'.join (unconverted) + '\n\n')
+
+if notags_files:
+ sys.stderr.write ('No tags could be found in these files:\n')
+ sys.stderr.write ('\n'.join (notags_files) + '\n\n')
+
+dump_file_list ('lsr-unsafe.txt', unsafe)
+sys.stderr.write ('''
+
+Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY!
+ git add input/lsr/*.ly
+ xargs git-diff HEAD < lsr-unsafe.txt
+
+''')
+
--- /dev/null
+#!/usr/bin/env python
+
+notes = "CDEFGAB"
+alterations = [-1, 0, 1]
+
+def print_note (octave, note, alteration):
+ print " <note>\n <pitch>\n <step>%s</step>" % notes[note]
+ if alteration <> 0:
+ print " <alter>%s</alter>" % alteration
+ print " <octave>%s</octave>\n </pitch>\n <duration>1</duration>\n <voice>1</voice>\n <type>quarter</type>\n </note>" % octave
+
+
+print """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
+ "http://www.musicxml.org/dtds/partwise.dtd">
+<score-partwise>
+ <movement-title>Various piches and interval sizes</movement-title>
+ <part-list>
+ <score-part id="P1">
+ <part-name>MusicXML Part</part-name>
+ </score-part>
+ </part-list>
+ <!--=========================================================-->
+ <part id="P1">
+ <measure number="1">
+ <attributes>
+ <divisions>1</divisions>
+ <key>
+ <fifths>0</fifths>
+ <mode>major</mode>
+ </key>
+ <time symbol="common">
+ <beats>2</beats>
+ <beat-type>4</beat-type>
+ </time>
+ <clef>
+ <sign>G</sign>
+ <line>2</line>
+ </clef>
+ </attributes>
+"""
+
+start_octave = 5
+
+for octave in (start_octave, start_octave+1):
+ for note in (0,1,2,3,4,5,6):
+ for alteration in alterations:
+ if octave == start_octave and note == 0 and alteration == -1:
+ continue
+ print_note (octave, note, alteration)
+# if octave == start_octave and note == 0 and alteration == 0:
+# continue
+ print_note (start_octave-(octave-start_octave)-(1-(7-note)/7), (7-note)%7, -alteration)
+
+print """ </measure>
+ </part>
+</score-partwise>
+"""
--- /dev/null
+#!/usr/bin/env python
+
+notes = "CDEFGAB"
+alterations = [-1, 0, 1]
+
+def print_measure (nr, fifth, mode, atts1 = "", atts = "", final = ""):
+ print """ <measure number="%s">
+ <attributes>
+%s <key>
+ <fifths>%s</fifths>
+ <mode>%s</mode>
+ </key>
+%s </attributes>
+ <note>
+ <pitch>
+ <step>C</step>
+ <octave>4</octave>
+ </pitch>
+ <duration>2</duration>
+ <voice>1</voice>
+ <type>half</type>
+ </note>
+%s </measure>""" % (nr, atts1, fifth, mode, atts, final)
+
+first_div = """ <divisions>1</divisions>
+"""
+first_atts = """ <time symbol="common">
+ <beats>2</beats>
+ <beat-type>4</beat-type>
+ </time>
+ <clef>
+ <sign>G</sign>
+ <line>2</line>
+ </clef>
+"""
+
+final_barline = """ <barline location="right">
+ <bar-style>light-heavy</bar-style>
+ </barline>
+"""
+
+print """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
+ "http://www.musicxml.org/dtds/partwise.dtd">
+<score-partwise>
+ <movement-title>Different Key signatures</movement-title>
+ <identification>
+ <miscellaneous>
+ <miscellaneous-field name="description">Various key signature: from 11
+ flats to 11 sharps (each one first one measure in major, then one
+ measure in minor)</miscellaneous-field>
+ </miscellaneous>
+ </identification>
+ <part-list>
+ <score-part id="P1">
+ <part-name>MusicXML Part</part-name>
+ </score-part>
+ </part-list>
+ <!--=========================================================-->
+ <part id="P1">"""
+
+max_range = 11
+measure = 0
+for fifth in range(-max_range, max_range+1):
+ measure += 1
+ if fifth == -max_range:
+ print_measure (measure, fifth, "major", first_div, first_atts)
+ else:
+ print_measure (measure, fifth, "major")
+ measure += 1
+ if fifth == max_range:
+ print_measure (measure, fifth, "minor", "", "", final_barline)
+ else:
+ print_measure (measure, fifth, "minor")
+
+
+print """ </part>
+</score-partwise>"""
--- /dev/null
+#!/usr/bin/env python
+
+notes = "CDEFGAB"
+alterations = [-1, 0, 1]
+
+dot_xml = """ <dot/>
+"""
+tie_xml = """ <tie type="%s"/>
+"""
+tie_notation_xml = """ <notations><tied type="%s"/></notations>
+"""
+
+
+def generate_note (duration, end_tie = False):
+ if duration < 2:
+ (notetype, dur) = ("8th", 1)
+ elif duration < 4:
+ (notetype, dur) = ("quarter", 2)
+ elif duration < 8:
+ (notetype, dur) = ("half", 4)
+ else:
+ (notetype, dur) = ("whole", 8)
+ dur_processed = dur
+ dot = ""
+ if (duration - dur_processed >= dur/2):
+ dot = dot_xml
+ dur_processed += dur/2
+ if (duration - dur_processed >= max(dur/4, 1)):
+ dot += dot_xml
+ dur_processed += dur/4
+ tie = ""
+ tie_notation = ""
+ if end_tie:
+ tie += tie_xml % "stop"
+ tie_notation += tie_notation_xml % "stop"
+ second_note = None
+ if duration - dur_processed > 0:
+ second_note = generate_note (duration-dur_processed, True)
+ tie += tie_xml % "start"
+ tie_notation += tie_notation_xml % "start"
+ note = """ <note>
+ <pitch>
+ <step>C</step>
+ <octave>5</octave>
+ </pitch>
+ <duration>%s</duration>
+%s <voice>1</voice>
+ <type>%s</type>
+%s%s </note>""" % (dur_processed, tie, notetype, dot, tie_notation)
+ if second_note:
+ return "%s\n%s" % (note, second_note)
+ else:
+ return note
+
+def print_measure (nr, beats, type, params = "", attr = "", attr2 = "", barline = ""):
+ duration = 8*beats/type
+ note = generate_note (duration)
+
+ print """ <measure number="%s">
+ <attributes>
+%s <time%s>
+ <beats>%s</beats>
+ <beat-type>%s</beat-type>
+ </time>
+%s </attributes>
+%s
+%s </measure>""" % (nr, attr, params, beats, type, attr2, note, barline)
+
+first_key = """ <divisions>2</divisions>
+ <key>
+ <fifths>0</fifths>
+ <mode>major</mode>
+ </key>
+"""
+first_clef = """ <clef>
+ <sign>G</sign>
+ <line>2</line>
+ </clef>
+"""
+
+final_barline = """ <barline location="right">
+ <bar-style>light-heavy</bar-style>
+ </barline>
+"""
+
+print """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
+ "http://www.musicxml.org/dtds/partwise.dtd">
+<score-partwise>
+ <identification>
+ <miscellaneous>
+ <miscellaneous-field name="description">Various time signatures: 2/2
+ (alla breve), 4/4 (C), 2/2, 3/2, 2/4, 3/4, 4/4, 5/4, 3/8, 6/8,
+ 12/8</miscellaneous-field>
+ </miscellaneous>
+ </identification>
+ <part-list>
+ <score-part id="P1">
+ <part-name>MusicXML Part</part-name>
+ </score-part>
+ </part-list>
+ <!--=========================================================-->
+ <part id="P1">"""
+
+measure = 1
+
+print_measure (measure, 2, 2, " symbol=\"common\"", first_key, first_clef)
+measure += 1
+
+print_measure (measure, 4, 4, " symbol=\"common\"")
+measure += 1
+
+print_measure (measure, 2, 2)
+measure += 1
+
+print_measure (measure, 3, 2)
+measure += 1
+
+print_measure (measure, 2, 4)
+measure += 1
+
+print_measure (measure, 3, 4)
+measure += 1
+
+print_measure (measure, 4, 4)
+measure += 1
+
+print_measure (measure, 5, 4)
+measure += 1
+
+print_measure (measure, 3, 8)
+measure += 1
+
+print_measure (measure, 6, 8)
+measure += 1
+
+print_measure (measure, 12, 8, "", "", "", final_barline)
+measure += 1
+
+print """ </part>
+</score-partwise>"""
--- /dev/null
+Open($1);
+MergeKern($2)
+
+
+# The AFM files of `New Century Schoolbook' family as distributed within the
+# urw-fonts-1.0.7pre41.tar.bz2 archive contain a bunch of ligatures which
+# shouldn't be active by default:
+#
+# T + M -> trademark
+# N + o -> afii61352
+# i + j -> ij
+# I + J -> IJ
+#
+# This font bundle is shipped by Fedora Core 6 and other GNU/Linux
+# distributions; we simply remove those ligatures.
+
+SelectIf("trademark", "trademark", \
+ "afii61352", "afii61352", \
+ "ij", "ij", \
+ "IJ", "IJ");
+if (Strtol($version) < 20070501)
+ RemoveATT("Ligature", "*", "*");
+else
+ RemovePosSub("*");
+endif
+
+Generate($3 + $fontname + ".otf");
+
+# EOF
--- /dev/null
+#!/usr/bin/env python
+import os
+import sys
+
+for i in sys.argv[1:]:
+ print os.path.realpath (i)
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Temporary script that helps translated docs sources conversion
+# for texi2html processing
+
+# USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES
+
+print "tely-gettext.py"
+
+import sys
+import re
+import os
+import gettext
+
+if len (sys.argv) > 3:
+ buildscript_dir, localedir, lang = sys.argv[1:4]
+else:
+ print """USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES
+ For example scripts/aux/tely-gettext.py python/out Documentation/po/out-www de Documentation/de/user/*.tely"""
+ sys.exit (1)
+
+sys.path.append (buildscript_dir)
+import langdefs
+
+double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
+t = gettext.translation('lilypond-doc', localedir, [lang])
+_doc = t.gettext
+
+include_re = re.compile (r'@include (.*?)$', re.M)
+whitespaces = re.compile (r'\s+')
+ref_re = re.compile (r'(?ms)@(ruser|rprogram|ref|rlearning)\{(.*?)\}')
+node_section_re = re.compile (r'@node (.*?)\n@((?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) (.*?)\n')
+menu_entry_re = re.compile (r'\* (.*?)::')
+
+def ref_gettext (m):
+ r = whitespaces.sub (' ', m.group (2))
+ return '@' + m.group (1) + '{' + _doc (r) + '}'
+
+def node_gettext (m):
+ return '@node ' + _doc (m.group (1)) + '\n@' + \
+ m.group (2) + ' ' + _doc (m.group (3)) + \
+ '\n@translationof ' + m.group (1) + '\n'
+
+def menu_entry_gettext (m):
+ return '* ' + _doc (m.group (1)) + '::'
+
+def process_file (filename):
+ print "Processing %s" % filename
+ f = open (filename, 'r')
+ page = f.read ()
+ f.close()
+ page = node_section_re.sub (node_gettext, page)
+ page = ref_re.sub (ref_gettext, page)
+ page = menu_entry_re.sub (menu_entry_gettext, page)
+ page = page.replace ("""-- SKELETON FILE --
+When you actually translate this file, please remove these lines as
+well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", """@c -- SKELETON FILE --""")
+ page = page.replace ('UNTRANSLATED NODE: IGNORE ME', "@c UNTRANSLATED NODE: IGNORE ME")
+ includes = [whitespaces.sub ('', f) for f in include_re.findall (page)]
+ f = open (filename, 'w')
+ f.write (page)
+ f.close ()
+ dir = os.path.dirname (filename)
+ for file in includes:
+ p = os.path.join (dir, file)
+ if os.path.exists (p):
+ process_file (p)
+
+for filename in sys.argv[4:]:
+ process_file (filename)
--- /dev/null
+#!/usr/bin/env python
+# texi-langutils.py
+
+# WARNING: this script can't find files included in a different directory
+
+import sys
+import re
+import getopt
+import os
+
+import langdefs
+
+def read_pipe (command):
+ print command
+ pipe = os.popen (command)
+ output = pipe.read ()
+ if pipe.close ():
+ print "pipe failed: %(command)s" % locals ()
+ return output
+
+
+optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext'])
+process_includes = not ('-n', '') in optlist # -n don't process @include's in texinfo files
+
+make_gettext = ('--gettext', '') in optlist # --gettext generate a node list from a Texinfo source
+make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source
+
+output_file = 'doc.pot'
+
+# @untranslated should be defined as a macro in Texinfo source
+node_blurb = '''@untranslated
+'''
+doclang = ''
+head_committish = read_pipe ('git-rev-parse HEAD')
+intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*-
+@c This file is part of %(topfile)s
+@ignore
+ Translation of GIT committish: %(head_committish)s
+ When revising a translation, copy the HEAD committish of the
+ version that you are working on. See TRANSLATION for details.
+@end ignore
+'''
+
+end_blurb = """
+@c -- SKELETON FILE --
+"""
+
+for x in optlist:
+ if x[0] == '-o': # -o NAME set PO output file name to NAME
+ output_file = x[1]
+ elif x[0] == '-d': # -d DIR set working directory to DIR
+ os.chdir (x[1])
+ elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB
+ node_blurb = x[1]
+ elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB
+ intro_blurb = x[1]
+ elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG
+ doclang = '; documentlanguage: ' + x[1]
+
+texinfo_with_menus_re = re.compile (r"^(\*) +([^:\n]+)::.*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.*?)$|@(rglos){(.+?)}", re.M)
+
+texinfo_re = re.compile (r"^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M)
+
+ly_string_re = re.compile (r'^([a-zA-Z]+)[\t ]*=|%+[\t ]*(.*)$|\\(?:new|context)\s+(?:[a-zA-Z]*?(?:Staff(?:Group)?|Voice|FiguredBass|FretBoards|Names|Devnull))\s+=\s+"?([a-zA-Z]+)"?\s+')
+lsr_verbatim_ly_re = re.compile (r'% begin verbatim$')
+texinfo_verbatim_ly_re = re.compile (r'^@lilypond\[.*?verbatim')
+
+def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None, scan_ly=False):
+ try:
+ f = open (texifilename, 'r')
+ texifile = f.read ()
+ f.close ()
+ printedfilename = texifilename.replace ('../','')
+ includes = []
+
+ # process ly var names and comments
+ if output_file and (scan_ly or texifilename.endswith ('.ly')):
+ lines = texifile.splitlines ()
+ i = 0
+ in_verb_ly_block = False
+ if texifilename.endswith ('.ly'):
+ verbatim_ly_re = lsr_verbatim_ly_re
+ else:
+ verbatim_ly_re = texinfo_verbatim_ly_re
+ for i in range (len (lines)):
+ if verbatim_ly_re.search (lines[i]):
+ in_verb_ly_block = True
+ elif lines[i].startswith ('@end lilypond'):
+ in_verb_ly_block = False
+ elif in_verb_ly_block:
+ for (var, comment, context_id) in ly_string_re.findall (lines[i]):
+ if var:
+ output_file.write ('# ' + printedfilename + ':' + \
+ str (i + 1) + ' (variable)\n_(r"' + var + '")\n')
+ elif comment:
+ output_file.write ('# ' + printedfilename + ':' + \
+ str (i + 1) + ' (comment)\n_(r"' + \
+ comment.replace ('"', '\\"') + '")\n')
+ elif context_id:
+ output_file.write ('# ' + printedfilename + ':' + \
+ str (i + 1) + ' (context id)\n_(r"' + \
+ context_id + '")\n')
+
+ # process Texinfo node names and section titles
+ if write_skeleton:
+ g = open (os.path.basename (texifilename), 'w')
+ subst = globals ()
+ subst.update (locals ())
+ g.write (i_blurb % subst)
+ tutu = texinfo_with_menus_re.findall (texifile)
+ node_trigger = False
+ for item in tutu:
+ if item[0] == '*':
+ g.write ('* ' + item[1] + '::\n')
+ elif output_file and item[4] == 'rglos':
+ output_file.write ('_(r"' + item[5] + '") # @rglos in ' + printedfilename + '\n')
+ elif item[2] == 'menu':
+ g.write ('@menu\n')
+ elif item[2] == 'end menu':
+ g.write ('@end menu\n\n')
+ else:
+ g.write ('@' + item[2] + ' ' + item[3] + '\n')
+ if node_trigger:
+ g.write (n_blurb)
+ node_trigger = False
+ elif item[2] == 'include':
+ includes.append (item[3])
+ else:
+ if output_file:
+ output_file.write ('# @' + item[2] + ' in ' + \
+ printedfilename + '\n_(r"' + item[3].strip () + '")\n')
+ if item[2] == 'node':
+ node_trigger = True
+ g.write (end_blurb)
+ g.close ()
+
+ elif output_file:
+ toto = texinfo_re.findall (texifile)
+ for item in toto:
+ if item[0] == 'include':
+ includes.append(item[1])
+ elif item[2] == 'rglos':
+ output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n')
+ else:
+ output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n')
+
+ if process_includes:
+ dir = os.path.dirname (texifilename)
+ for item in includes:
+ process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file, scan_ly)
+ except IOError, (errno, strerror):
+ sys.stderr.write ("I/O error(%s): %s: %s\n" % (errno, texifilename, strerror))
+
+
+if intro_blurb != '':
+ intro_blurb += '\n\n'
+if node_blurb != '':
+ node_blurb = '\n' + node_blurb + '\n\n'
+if make_gettext:
+ node_list_filename = 'node_list'
+ node_list = open (node_list_filename, 'w')
+ node_list.write ('# -*- coding: utf-8 -*-\n')
+ for texi_file in texi_files:
+ # Urgly: scan ly comments and variable names only in English doco
+ is_english_doc = 'Documentation/user' in texi_file
+ process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
+ os.path.basename (texi_file), node_list,
+ scan_ly=is_english_doc)
+ for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'):
+ node_list.write ('_(r"' + word + '")\n')
+ node_list.close ()
+ os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename)
+else:
+ for texi_file in texi_files:
+ process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
+ os.path.basename (texi_file))
--- /dev/null
+#!/usr/bin/env python
+# texi-skeleton-update.py
+
+import sys
+import glob
+import os
+import shutil
+
+sys.stderr.write ('texi-skeleton-update.py\n')
+
+orig_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[1] + '/*.ite??')])
+new_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[2] + '/*.ite??')])
+
+for f in new_skeletons:
+ if f in orig_skeletons:
+ g = open (os.path.join (sys.argv[1], f), 'r').read ()
+ if '-- SKELETON FILE --' in g:
+ sys.stderr.write ("Updating %s...\n" % f)
+ shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
+ elif f != 'fdl.itexi':
+ sys.stderr.write ("Copying new file %s...\n" % f)
+ shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
+
+for f in orig_skeletons.difference (new_skeletons):
+ sys.stderr.write ("Warning: outdated skeleton file %s\n" % f)
--- /dev/null
+#!/usr/bin/env python
+
+"""
+USAGE: translations-status.py BUILDSCRIPT-DIR LOCALEDIR
+
+ This script must be run from Documentation/
+
+ Reads template files translations.template.html.in
+and for each LANG in LANGUAGES LANG/translations.template.html.in
+ Writes translations.html.in and for each LANG in LANGUAGES
+translations.LANG.html.in
+ Writes out/translations-status.txt
+ Updates word counts in TRANSLATION
+"""
+
+import sys
+import re
+import string
+import os
+
+import langdefs
+import buildlib
+
+def progress (str):
+ sys.stderr.write (str + '\n')
+
+progress ("translations-status.py")
+
+_doc = lambda s: s
+
+# load gettext messages catalogs
+translation = langdefs.translation
+
+
+language_re = re.compile (r'^@documentlanguage (.+)', re.M)
+comments_re = re.compile (r'^@ignore\n(.|\n)*?\n@end ignore$|@c .*?$', re.M)
+space_re = re.compile (r'\s+', re.M)
+lilypond_re = re.compile (r'@lilypond({.*?}|(.|\n)*?\n@end lilypond$)', re.M)
+node_re = re.compile ('^@node .*?$', re.M)
+title_re = re.compile ('^@(top|chapter|(?:sub){0,2}section|' + \
+'(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?) (.*?)$', re.M)
+include_re = re.compile ('^@include (.*?)$', re.M)
+
+translators_re = re.compile (r'^@c\s+Translators\s*:\s*(.*?)$', re.M | re.I)
+checkers_re = re.compile (r'^@c\s+Translation\s*checkers\s*:\s*(.*?)$',
+ re.M | re.I)
+status_re = re.compile (r'^@c\s+Translation\s*status\s*:\s*(.*?)$', re.M | re.I)
+post_gdp_re = re.compile ('post.GDP', re.I)
+untranslated_node_str = '@untranslated'
+skeleton_str = '-- SKELETON FILE --'
+
+section_titles_string = _doc ('Section titles')
+last_updated_string = _doc (' <p><i>Last updated %s</i></p>\n')
+detailed_status_heads = [_doc ('Translators'), _doc ('Translation checkers'),
+ _doc ('Translated'), _doc ('Up to date'),
+ _doc ('Other info')]
+format_table = {
+ 'not translated': {'color':'d0f0f8', 'short':_doc ('no'), 'abbr':'NT',
+ 'long':_doc ('not translated')},
+ 'partially translated': {'color':'dfef77',
+ 'short':_doc ('partially (%(p)d %%)'),
+ 'abbr':'%(p)d%%',
+ 'long':_doc ('partially translated (%(p)d %%)')},
+ 'fully translated': {'color':'1fff1f', 'short':_doc ('yes'), 'abbr':'FT',
+ 'long': _doc ('translated')},
+ 'up to date': {'short':_doc ('yes'), 'long':_doc ('up to date'),
+ 'abbr':'100%%', 'vague':_doc ('up to date')},
+ 'outdated': {'short':_doc ('partially'), 'abbr':'%(p)d%%',
+ 'vague':_doc ('partially up to date')},
+ 'N/A': {'short':_doc ('N/A'), 'abbr':'N/A', 'color':'d587ff', 'vague':''},
+ 'pre-GDP':_doc ('pre-GDP'),
+ 'post-GDP':_doc ('post-GDP')
+}
+
+texi_level = {
+# (Unumbered/Numbered/Lettered, level)
+ 'top': ('u', 0),
+ 'unnumbered': ('u', 1),
+ 'unnumberedsec': ('u', 2),
+ 'unnumberedsubsec': ('u', 3),
+ 'chapter': ('n', 1),
+ 'section': ('n', 2),
+ 'subsection': ('n', 3),
+ 'appendix': ('l', 1)
+}
+
+appendix_number_trans = string.maketrans ('@ABCDEFGHIJKLMNOPQRSTUVWXY',
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
+
+class SectionNumber (object):
+ def __init__ (self):
+ self.__data = [[0,'u']]
+
+ def __increase_last_index (self):
+ type = self.__data[-1][1]
+ if type == 'l':
+ self.__data[-1][0] = \
+ self.__data[-1][0].translate (appendix_number_trans)
+ elif type == 'n':
+ self.__data[-1][0] += 1
+
+ def format (self):
+ if self.__data[-1][1] == 'u':
+ return ''
+ return '.'.join ([str (i[0]) for i in self.__data if i[1] != 'u']) + ' '
+
+ def increase (self, (type, level)):
+ if level == 0:
+ self.__data = [[0,'u']]
+ while level + 1 < len (self.__data):
+ del self.__data[-1]
+ if level + 1 > len (self.__data):
+ self.__data.append ([0, type])
+ if type == 'l':
+ self.__data[-1][0] = '@'
+ if type == self.__data[-1][1]:
+ self.__increase_last_index ()
+ else:
+ self.__data[-1] = ([0, type])
+ if type == 'l':
+ self.__data[-1][0] = 'A'
+ elif type == 'n':
+ self.__data[-1][0] = 1
+ return self.format ()
+
+
+def percentage_color (percent):
+ p = percent / 100.0
+ if p < 0.33:
+ c = [hex (int (3 * p * b + (1 - 3 * p) * a))[2:]
+ for (a, b) in [(0xff, 0xff), (0x5c, 0xa6), (0x5c, 0x4c)]]
+ elif p < 0.67:
+ c = [hex (int ((3 * p - 1) * b + (2 - 3 * p) * a))[2:]
+ for (a, b) in [(0xff, 0xff), (0xa6, 0xff), (0x4c, 0x3d)]]
+ else:
+ c = [hex (int ((3 * p - 2) * b + 3 * (1 - p) * a))[2:]
+ for (a, b) in [(0xff, 0x1f), (0xff, 0xff), (0x3d, 0x1f)]]
+ return ''.join (c)
+
+
+def update_word_count (text, filename, word_count):
+ return re.sub (r'(?m)^(\d+) *' + filename,
+ str (word_count).ljust (6) + filename,
+ text)
+
+po_msgid_re = re.compile (r'^msgid "(.*?)"(?:\n"(.*?)")*', re.M)
+
+def po_word_count (po_content):
+ s = ' '.join ([''.join (t) for t in po_msgid_re.findall (po_content)])
+ return len (space_re.split (s))
+
+sgml_tag_re = re.compile (r'<.*?>', re.S)
+
+def sgml_word_count (sgml_doc):
+ s = sgml_tag_re.sub ('', sgml_doc)
+ return len (space_re.split (s))
+
+def tely_word_count (tely_doc):
+ '''
+ Calculate word count of a Texinfo document node by node.
+
+ Take string tely_doc as an argument.
+ Return a list of integers.
+
+ Texinfo comments and @lilypond blocks are not included in word counts.
+ '''
+ tely_doc = comments_re.sub ('', tely_doc)
+ tely_doc = lilypond_re.sub ('', tely_doc)
+ nodes = node_re.split (tely_doc)
+ return [len (space_re.split (n)) for n in nodes]
+
+
+class TelyDocument (object):
+ def __init__ (self, filename):
+ self.filename = filename
+ self.contents = open (filename).read ()
+
+ ## record title and sectionning level of first Texinfo section
+ m = title_re.search (self.contents)
+ if m:
+ self.title = m.group (2)
+ self.level = texi_level [m.group (1)]
+ else:
+ self.title = 'Untitled'
+ self.level = ('u', 1)
+
+ m = language_re.search (self.contents)
+ if m:
+ self.language = m.group (1)
+
+ included_files = [os.path.join (os.path.dirname (filename), t)
+ for t in include_re.findall (self.contents)]
+ self.included_files = [p for p in included_files if os.path.exists (p)]
+
+ def print_title (self, section_number):
+ return section_number.increase (self.level) + self.title
+
+
+class TranslatedTelyDocument (TelyDocument):
+ def __init__ (self, filename, masterdocument, parent_translation=None):
+ TelyDocument.__init__ (self, filename)
+
+ self.masterdocument = masterdocument
+ if not hasattr (self, 'language') \
+ and hasattr (parent_translation, 'language'):
+ self.language = parent_translation.language
+ if hasattr (self, 'language'):
+ self.translation = translation[self.language]
+ else:
+ self.translation = lambda x: x
+ self.title = self.translation (self.title)
+
+ ## record authoring information
+ m = translators_re.search (self.contents)
+ if m:
+ self.translators = [n.strip () for n in m.group (1).split (',')]
+ else:
+ self.translators = parent_translation.translators
+ m = checkers_re.search (self.contents)
+ if m:
+ self.checkers = [n.strip () for n in m.group (1).split (',')]
+ elif isinstance (parent_translation, TranslatedTelyDocument):
+ self.checkers = parent_translation.checkers
+ else:
+ self.checkers = []
+
+ ## check whether translation is pre- or post-GDP
+ m = status_re.search (self.contents)
+ if m:
+ self.post_gdp = bool (post_gdp_re.search (m.group (1)))
+ else:
+ self.post_gdp = False
+
+ ## record which parts (nodes) of the file are actually translated
+ self.partially_translated = not skeleton_str in self.contents
+ nodes = node_re.split (self.contents)
+ self.translated_nodes = [not untranslated_node_str in n for n in nodes]
+
+ ## calculate translation percentage
+ master_total_word_count = sum (masterdocument.word_count)
+ translation_word_count = \
+ sum ([masterdocument.word_count[k] * self.translated_nodes[k]
+ for k in range (min (len (masterdocument.word_count),
+ len (self.translated_nodes)))])
+ self.translation_percentage = \
+ 100 * translation_word_count / master_total_word_count
+
+ ## calculate how much the file is outdated
+ (diff_string, error) = \
+ buildlib.check_translated_doc (masterdocument.filename, self.filename, self.contents)
+ if error:
+ sys.stderr.write ('warning: %s: %s' % (self.filename, error))
+ self.uptodate_percentage = None
+ else:
+ diff = diff_string.splitlines ()
+ insertions = sum ([len (l) - 1 for l in diff
+ if l.startswith ('+')
+ and not l.startswith ('+++')])
+ deletions = sum ([len (l) - 1 for l in diff
+ if l.startswith ('-')
+ and not l.startswith ('---')])
+ outdateness_percentage = 50.0 * (deletions + insertions) / \
+ (masterdocument.size + 0.5 * (deletions - insertions))
+ self.uptodate_percentage = 100 - int (outdateness_percentage)
+ if self.uptodate_percentage > 100:
+ alternative = 50
+ progress ("%s: strange uptodateness percentage %d %%, \
+setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
+ self.uptodate_percentage = alternative
+ elif self.uptodate_percentage < 1:
+ alternative = 1
+ progress ("%s: strange uptodateness percentage %d %%, \
+setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
+ self.uptodate_percentage = alternative
+
+ def completeness (self, formats=['long'], translated=False):
+ if translated:
+ translation = self.translation
+ else:
+ translation = lambda x: x
+
+ if isinstance (formats, str):
+ formats = [formats]
+ p = self.translation_percentage
+ if p == 0:
+ status = 'not translated'
+ elif p == 100:
+ status = 'fully translated'
+ else:
+ status = 'partially translated'
+ return dict ([(f, translation (format_table[status][f]) % locals())
+ for f in formats])
+
+ def uptodateness (self, formats=['long'], translated=False):
+ if translated:
+ translation = self.translation
+ else:
+ translation = lambda x: x
+
+ if isinstance (formats, str):
+ formats = [formats]
+ p = self.uptodate_percentage
+ if p == None:
+ status = 'N/A'
+ elif p == 100:
+ status = 'up to date'
+ else:
+ status = 'outdated'
+ l = {}
+ for f in formats:
+ if f == 'color' and p != None:
+ l['color'] = percentage_color (p)
+ else:
+ l[f] = translation (format_table[status][f]) % locals ()
+ return l
+
+ def gdp_status (self):
+ if self.post_gdp:
+ return self.translation (format_table['post-GDP'])
+ else:
+ return self.translation (format_table['pre-GDP'])
+
+ def short_html_status (self):
+ s = ' <td>'
+ if self.partially_translated:
+ s += '<br>\n '.join (self.translators) + '<br>\n'
+ if self.checkers:
+ s += ' <small>' + \
+ '<br>\n '.join (self.checkers) + '</small><br>\n'
+
+ c = self.completeness (['color', 'long'])
+ s += ' <span style="background-color: #%(color)s">\
+%(long)s</span><br>\n' % c
+
+ if self.partially_translated:
+ u = self.uptodateness (['vague', 'color'])
+ s += ' <span style="background-color: #%(color)s">\
+%(vague)s</span><br>\n' % u
+
+ s += ' </td>\n'
+ return s
+
+ def text_status (self):
+ s = self.completeness ('abbr')['abbr'] + ' '
+
+ if self.partially_translated:
+ s += self.uptodateness ('abbr')['abbr'] + ' '
+ return s
+
+ def html_status (self, numbering=SectionNumber ()):
+ if self.title == 'Untitled':
+ return ''
+
+ if self.level[1] == 0: # if self is a master document
+ s = '''<table align="center" border="2">
+ <tr align="center">
+ <th>%s</th>''' % self.print_title (numbering)
+ s += ''.join ([' <th>%s</th>\n' % self.translation (h)
+ for h in detailed_status_heads])
+ s += ' </tr>\n'
+ s += ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
+ % (self.translation (section_titles_string),
+ sum (self.masterdocument.word_count))
+
+ else:
+ s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
+ % (self.print_title (numbering),
+ sum (self.masterdocument.word_count))
+
+ if self.partially_translated:
+ s += ' <td>' + '<br>\n '.join (self.translators) + '</td>\n'
+ s += ' <td>' + '<br>\n '.join (self.checkers) + '</td>\n'
+ else:
+ s += ' <td></td>\n' * 2
+
+ c = self.completeness (['color', 'short'], translated=True)
+ s += ' <td><span style="background-color: #%(color)s">\
+%(short)s</span></td>\n' % {'color': c['color'],
+ 'short': c['short']}
+
+ if self.partially_translated:
+ u = self.uptodateness (['short', 'color'], translated=True)
+ s += ' <td><span style="background-color: #%(color)s">\
+%(short)s</span></td>\n' % {'color': u['color'],
+ 'short': u['short']}
+ else:
+ s += ' <td></td>\n'
+
+ s += ' <td>' + self.gdp_status () + '</td>\n </tr>\n'
+ s += ''.join ([i.translations[self.language].html_status (numbering)
+ for i in self.masterdocument.includes
+ if self.language in i.translations])
+
+ if self.level[1] == 0: # if self is a master document
+ s += '</table>\n<p></p>\n'
+ return s
+
+class MasterTelyDocument (TelyDocument):
+ def __init__ (self,
+ filename,
+ parent_translations=dict ([(lang, None)
+ for lang in langdefs.LANGDICT])):
+ TelyDocument.__init__ (self, filename)
+ self.size = len (self.contents)
+ self.word_count = tely_word_count (self.contents)
+ translations = dict ([(lang, os.path.join (lang, filename))
+ for lang in langdefs.LANGDICT])
+ self.translations = \
+ dict ([(lang,
+ TranslatedTelyDocument (translations[lang],
+ self, parent_translations.get (lang)))
+ for lang in langdefs.LANGDICT
+ if os.path.exists (translations[lang])])
+ if self.translations:
+ self.includes = [MasterTelyDocument (f, self.translations)
+ for f in self.included_files]
+ else:
+ self.includes = []
+
+ def update_word_counts (self, s):
+ s = update_word_count (s, self.filename, sum (self.word_count))
+ for i in self.includes:
+ s = i.update_word_counts (s)
+ return s
+
+ def html_status (self, numbering=SectionNumber ()):
+ if self.title == 'Untitled' or not self.translations:
+ return ''
+ if self.level[1] == 0: # if self is a master document
+ s = '''<table align="center" border="2">
+ <tr align="center">
+ <th>%s</th>''' % self.print_title (numbering)
+ s += ''.join ([' <th>%s</th>\n' % l for l in self.translations])
+ s += ' </tr>\n'
+ s += ' <tr align="left">\n <td>Section titles<br>(%d)</td>\n' \
+ % sum (self.word_count)
+
+ else: # if self is an included file
+ s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
+ % (self.print_title (numbering), sum (self.word_count))
+
+ s += ''.join ([t.short_html_status ()
+ for t in self.translations.values ()])
+ s += ' </tr>\n'
+ s += ''.join ([i.html_status (numbering) for i in self.includes])
+
+ if self.level[1] == 0: # if self is a master document
+ s += '</table>\n<p></p>\n'
+ return s
+
+ def text_status (self, numbering=SectionNumber (), colspec=[48,12]):
+ if self.title == 'Untitled' or not self.translations:
+ return ''
+
+ s = ''
+ if self.level[1] == 0: # if self is a master document
+ s += (self.print_title (numbering) + ' ').ljust (colspec[0])
+ s += ''.join (['%s'.ljust (colspec[1]) % l
+ for l in self.translations])
+ s += '\n'
+ s += ('Section titles (%d)' % \
+ sum (self.word_count)).ljust (colspec[0])
+
+ else:
+ s = '%s (%d) ' \
+ % (self.print_title (numbering), sum (self.word_count))
+ s = s.ljust (colspec[0])
+
+ s += ''.join ([t.text_status ().ljust(colspec[1])
+ for t in self.translations.values ()])
+ s += '\n\n'
+ s += ''.join ([i.text_status (numbering) for i in self.includes])
+
+ if self.level[1] == 0:
+ s += '\n'
+ return s
+
+
+update_category_word_counts_re = re.compile (r'(?ms)^-(\d+)-(.*?\n)\d+ *total')
+
+counts_re = re.compile (r'(?m)^(\d+) ')
+
+def update_category_word_counts_sub (m):
+ return '-' + m.group (1) + '-' + m.group (2) + \
+ str (sum ([int (c)
+ for c in counts_re.findall (m.group (2))])).ljust (6) + \
+ 'total'
+
+
+progress ("Reading documents...")
+
+tely_files = \
+ buildlib.read_pipe ("find -maxdepth 2 -name '*.tely'")[0].splitlines ()
+tely_files.sort ()
+master_docs = [MasterTelyDocument (os.path.normpath (filename))
+ for filename in tely_files]
+master_docs = [doc for doc in master_docs if doc.translations]
+
+main_status_page = open ('translations.template.html.in').read ()
+
+enabled_languages = [l for l in langdefs.LANGDICT
+ if langdefs.LANGDICT[l].enabled
+ and l != 'en']
+lang_status_pages = \
+ dict ([(l, open (os.path.join (l, 'translations.template.html.in')). read ())
+ for l in enabled_languages])
+
+progress ("Generating status pages...")
+
+date_time = buildlib.read_pipe ('LANG= date -u')[0]
+
+main_status_html = last_updated_string % date_time
+main_status_html += '\n'.join ([doc.html_status () for doc in master_docs])
+
+html_re = re.compile ('<html>', re.I)
+end_body_re = re.compile ('</body>', re.I)
+
+html_header = '''<html>
+<!-- This page is automatically generated by translation-status.py from
+translations.template.html.in; DO NOT EDIT !-->'''
+
+main_status_page = html_re.sub (html_header, main_status_page)
+
+main_status_page = end_body_re.sub (main_status_html + '\n</body>',
+ main_status_page)
+
+open ('translations.html.in', 'w').write (main_status_page)
+
+for l in enabled_languages:
+ date_time = buildlib.read_pipe ('LANG=%s date -u' % l)[0]
+ lang_status_pages[l] = translation[l] (last_updated_string) % date_time + lang_status_pages[l]
+ lang_status_page = html_re.sub (html_header, lang_status_pages[l])
+ html_status = '\n'.join ([doc.translations[l].html_status ()
+ for doc in master_docs
+ if l in doc.translations])
+ lang_status_page = end_body_re.sub (html_status + '\n</body>',
+ lang_status_page)
+ open (os.path.join (l, 'translations.html.in'), 'w').write (lang_status_page)
+
+main_status_txt = '''Documentation translations status
+Generated %s
+NT = not translated
+FT = fully translated
+
+''' % date_time
+
+main_status_txt += '\n'.join ([doc.text_status () for doc in master_docs])
+
+status_txt_file = 'out/translations-status.txt'
+progress ("Writing %s..." % status_txt_file)
+open (status_txt_file, 'w').write (main_status_txt)
+
+translation_instructions_file = 'TRANSLATION'
+progress ("Updating %s..." % translation_instructions_file)
+translation_instructions = open (translation_instructions_file).read ()
+
+for doc in master_docs:
+ translation_instructions = doc.update_word_counts (translation_instructions)
+
+for html_file in re.findall (r'(?m)^\d+ *(\S+?\.html\S*?)(?: |$)',
+ translation_instructions):
+ word_count = sgml_word_count (open (html_file).read ())
+ translation_instructions = update_word_count (translation_instructions,
+ html_file,
+ word_count)
+
+for po_file in re.findall (r'(?m)^\d+ *(\S+?\.po\S*?)(?: |$)',
+ translation_instructions):
+ word_count = po_word_count (open (po_file).read ())
+ translation_instructions = update_word_count (translation_instructions,
+ po_file,
+ word_count)
+
+translation_instructions = \
+ update_category_word_counts_re.sub (update_category_word_counts_sub,
+ translation_instructions)
+
+open (translation_instructions_file, 'w').write (translation_instructions)
--- /dev/null
+#!/usr/bin/env python
+# update-snippets.py
+
+# USAGE: update-snippets.py REFERENCE-DIR TARGET-DIR FILES
+#
+# update ly snippets in TARGET-DIR/FILES with snippets from REFERENCE-DIR/FILES
+#
+# More precisely, each existing FILE in TARGET-DIR is matched to the FILE in
+# REFERENCE-DIR (it the latter does not exist, a warning is given).
+#
+# Shell wildcards expansion is performed on FILES.
+# This script currently supports Texinfo format.
+# Ly snippets preceded with a line containing '@c KEEP LY' in TARGET-DIR/FILES
+# will not be updated.
+# An error occurs if REFERENCE-DIR/FILE and TARGET-DIR/FILE do not have the
+# same snippets count.
+
+import sys
+import os
+import glob
+import re
+
+print "update-snippets.py"
+
+comment_re = re.compile (r'(?<!@)(@c(?:omment)? .*?\n|^@ignore\n.*?\n@end ignore\n)', re.M | re.S)
+snippet_re = re.compile (r'^(@lilypond(?:file)?(?:\[.*?\])?\s*\{.+?\}|@lilypond(?:\[.*?\])?(?:.|\n)+?@end lilypond)', re.M)
+
+
+def snippet_split (l):
+ r = []
+ for s in [s for s in l if s]:
+ if s.startswith ('@c ') or s.startswith ('@ignore\n') or s.startswith ('@comment '):
+ r.append(s)
+ else:
+ r += [t for t in snippet_re.split (s) if t]
+ return r
+
+def count_snippet (l):
+ k = 0
+ for s in l:
+ if s.startswith ('@lilypond'):
+ k += 1
+ return k
+
+def find_next_snippet (l, k):
+ while not l[k].startswith ('@lilypond'):
+ k += 1
+ return k
+
+exit_code = 0
+
+def update_exit_code (code):
+ global exit_code
+ exit_code = max (code, exit_code)
+
+ref_dir, target_dir = sys.argv [1:3]
+file_patterns = sys.argv[3:]
+
+total_snippet_count = 0
+changed_snippets_count = 0
+
+for pattern in file_patterns:
+ files = glob.glob (os.path.join (target_dir, pattern))
+ for file in files:
+ ref_file = os.path.join (ref_dir, os.path.basename (file))
+ if not os.path.isfile (ref_file):
+ sys.stderr.write ("Warning: %s: no such file.\nReference file for %s not found.\n" % (ref_file, file))
+ continue
+ f = open (file, 'r')
+ target_source = comment_re.split (f.read ())
+ f.close ()
+ if reduce (lambda x, y: x or y, ['-- SKELETON FILE --' in s for s in target_source]):
+ sys.stderr.write ("Skipping skeleton file %s\n" % file)
+ continue
+ g = open (ref_file, 'r')
+ ref_source = comment_re.split (g.read ())
+ target_source = snippet_split (target_source)
+ ref_source = snippet_split (ref_source)
+ if '' in target_source or '' in ref_source:
+ raise "AAAAARGH: unuseful empty string"
+ snippet_count = count_snippet (target_source)
+ if not snippet_count == count_snippet (ref_source):
+ update_exit_code (1)
+ sys.stderr.write ("Error: %s and %s have different snippet counts.\n\
+Update translation by at least adding a @lilypond block where necessary, then rerun this script.\n" % (ref_file, file))
+ continue
+ total_snippet_count += snippet_count
+ c = 0
+ k = -1
+ for j in range (len (target_source)):
+ if target_source[j].startswith ('@lilypond'):
+ k = find_next_snippet (ref_source, k+1)
+ if j > 0 and (not target_source[j-1].startswith ('@c KEEP LY')) and target_source[j] != ref_source[k]:
+ target_source[j] = ref_source[k]
+ c += 1
+ changed_snippets_count += 1
+ f = open (file, 'w')
+ f.write (''.join (target_source))
+ sys.stderr.write ('%s: %d/%d snippets updated\n' % (file, c, snippet_count))
+
+sys.stderr.write ('\nTotal: %d snippets, %d updated snippets.\n' % (total_snippet_count, changed_snippets_count))
+sys.exit (exit_code)
--- /dev/null
+depth = ../..
+
+STEPMAKE_TEMPLATES=script install po
+
+include $(depth)/make/stepmake.make
+
+# Should we install these? This should be handled by sysadmin or
+# packager but if she forgets...
+#INSTALLATION_OUT_SUFFIXES=1
+#INSTALLATION_OUT_DIR1=$(local_lilypond_datadir)/scripts
+#INSTALLATION_OUT_FILES1=$(outdir)/lilypond-login $(outdir)/lilypond-profile
+
+all: $(INSTALLATION_FILES)
+
--- /dev/null
+#!@PYTHON@
+import os
+import sys
+import getopt
+import tempfile
+
+# usage:
+def usage ():
+ print 'usage: %s [-s style] [-o <outfile>] BIBFILES...'
+
+(options, files) = getopt.getopt (sys.argv[1:], 's:o:', [])
+
+output = 'bib.html'
+style = 'long'
+
+for (o,a) in options:
+ if o == '-h' or o == '--help':
+ usage ()
+ sys.exit (0)
+ elif o == '-s' or o == '--style':
+ style = a
+ elif o == '-o' or o == '--output':
+ output = a
+ else:
+ raise Exception ('unknown option: %s' % o)
+
+
+if style not in ['alpha','index','long','longp','long-pario','short','short-pario','split']:
+ sys.stderr.write ("Unknown style \`%s'\n" % style)
+
+tempfile = tempfile.mktemp ('bib2html')
+
+if not files:
+ usage ()
+ sys.exit (2)
+
+
+def strip_extension (f, ext):
+ (p, e) = os.path.splitext (f)
+ if e == ext:
+ e = ''
+ return p + e
+
+nf = []
+for f in files:
+ nf.append (strip_extension (f, '.bib'))
+
+files = ','.join (nf)
+
+open (tempfile + '.aux', 'w').write (r'''
+\relax
+\citation{*}
+\bibstyle{html-%(style)s}
+\bibdata{%(files)s}''' % vars ())
+
+cmd = "bibtex %s" % tempfile
+
+sys.stdout.write ("Invoking `%s'\n" % cmd)
+stat = os.system (cmd)
+if stat <> 0:
+ sys.exit(1)
+
+
+#TODO: do tex -> html on output
+
+bbl = open (tempfile + '.bbl').read ()
+
+open (output, 'w').write (bbl)
+
+
+def cleanup (tempfile):
+ for a in ['aux','bbl', 'blg']:
+ os.unlink (tempfile + '.' + a)
+
+cleanup (tempfile)
+
--- /dev/null
+#!@PYTHON@
+
+import sys
+import midi
+
+(h,tracks) = midi.parse (open (sys.argv[1]).read ())
+
+tracks = tracks[1:]
+
+for t in tracks:
+ for e in t:
+ print e
--- /dev/null
+#!@PYTHON@
+# -*- coding: utf-8 -*-
+# extract_texi_filenames.py
+
+# USAGE: extract_texi_filenames.py [-o OUTDIR] FILES
+#
+# -o OUTDIR specifies that output files should rather be written in OUTDIR
+#
+# Description:
+# This script parses the .texi file given and creates a file with the
+# nodename <=> filename/anchor map.
+# The idea behind: Unnumbered subsections go into the same file as the
+# previous numbered section, @translationof gives the original node name,
+# which is then used for the filename/anchor.
+#
+# If this script is run on a file texifile.texi, it produces a file
+# texifile[.LANG].xref-map with tab-separated entries of the form
+# NODE\tFILENAME\tANCHOR
+# LANG is the document language in case it's not 'en'
+# Note: The filename does not have any extension appended!
+# This file can then be used by our texi2html init script to determine
+# the correct file name and anchor for external refs
+
+import sys
+import re
+import os
+import getopt
+
+optlist, args = getopt.getopt (sys.argv[1:],'o:')
+files = args
+
+outdir = '.'
+for x in optlist:
+ if x[0] == '-o':
+ outdir = x[1]
+
+if not os.path.isdir (outdir):
+ if os.path.exists (outdir):
+ os.unlink (outdir)
+ os.makedirs (outdir)
+
+include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M)
+whitespaces = re.compile (r'\s+')
+section_translation_re = re.compile ('^@(node|(?:unnumbered|appendix)\
+(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|\
+(?:major|chap|(?:sub){0,2})heading|translationof) (.*?)\\s*$', re.MULTILINE)
+
+def expand_includes (m, filename):
+ filepath = os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'
+ if os.path.exists (filepath):
+ return extract_sections (filepath)[1]
+ else:
+ print "Unable to locate include file " + filepath
+ return ''
+
+lang_re = re.compile (r'^@documentlanguage (.+)', re.M)
+
+def extract_sections (filename):
+ result = ''
+ f = open (filename, 'r')
+ page = f.read ()
+ f.close()
+ # Search document language
+ m = lang_re.search (page)
+ if m and m.group (1) != 'en':
+ lang_suffix = '.' + m.group (1)
+ else:
+ lang_suffix = ''
+ # Replace all includes by their list of sections and extract all sections
+ page = include_re.sub (lambda m: expand_includes (m, filename), page)
+ sections = section_translation_re.findall (page)
+ for sec in sections:
+ result += "@" + sec[0] + " " + sec[1] + "\n"
+ return (lang_suffix, result)
+
+# Convert a given node name to its proper file name (normalization as explained
+# in the texinfo manual:
+# http://www.gnu.org/software/texinfo/manual/texinfo/html_node/HTML-Xref-Node-Name-Expansion.html
+def texinfo_file_name(title):
+ # exception: The top node is always mapped to index.html
+ if title == "Top":
+ return "index"
+ # File name normalization by texinfo (described in the texinfo manual):
+ # 1/2: letters and numbers are left unchanged
+ # 3/4: multiple, leading and trailing whitespace is removed
+ title = title.strip ();
+ title = whitespaces.sub (' ', title)
+ # 5: all remaining spaces are converted to '-'
+ # 6: all other 7- or 8-bit chars are replaced by _xxxx (xxxx=ascii character code)
+ result = ''
+ for index in range(len(title)):
+ char = title[index]
+ if char == ' ': # space -> '-'
+ result += '-'
+ elif ( ('0' <= char and char <= '9' ) or
+ ('A' <= char and char <= 'Z' ) or
+ ('a' <= char and char <= 'z' ) ): # number or letter
+ result += char
+ else:
+ ccode = ord(char)
+ if ccode <= 0xFFFF:
+ result += "_%04x" % ccode
+ else:
+ result += "__%06x" % ccode
+ # 7: if name begins with number, prepend 't_g' (so it starts with a letter)
+ if (result != '') and (ord(result[0]) in range (ord('0'), ord('9'))):
+ result = 't_g' + result
+ return result
+
+texinfo_re = re.compile (r'@.*{(.*)}')
+def remove_texinfo (title):
+ return texinfo_re.sub (r'\1', title)
+
+def create_texinfo_anchor (title):
+ return texinfo_file_name (remove_texinfo (title))
+
+unnumbered_re = re.compile (r'unnumbered.*')
+def process_sections (filename, lang_suffix, page):
+ sections = section_translation_re.findall (page)
+ basename = os.path.splitext (os.path.basename (filename))[0]
+ p = os.path.join (outdir, basename) + lang_suffix + '.xref-map'
+ f = open (p, 'w')
+
+ this_title = ''
+ this_filename = 'index'
+ this_anchor = ''
+ this_unnumbered = False
+ had_section = False
+ for sec in sections:
+ if sec[0] == "node":
+ # Write out the cached values to the file and start a new section:
+ if this_title != '' and this_title != 'Top':
+ f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
+ had_section = False
+ this_title = remove_texinfo (sec[1])
+ this_anchor = create_texinfo_anchor (sec[1])
+ elif sec[0] == "translationof":
+ anchor = create_texinfo_anchor (sec[1])
+ # If @translationof is used, it gives the original node name, which
+ # we use for the anchor and the file name (if it is a numbered node)
+ this_anchor = anchor
+ if not this_unnumbered:
+ this_filename = anchor
+ else:
+ # Some pages might not use a node for every section, so treat this
+ # case here, too: If we already had a section and encounter enother
+ # one before the next @node, we write out the old one and start
+ # with the new values
+ if had_section and this_title != '':
+ f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
+ this_title = remove_texinfo (sec[1])
+ this_anchor = create_texinfo_anchor (sec[1])
+ had_section = True
+
+ # unnumbered nodes use the previously used file name, only numbered
+ # nodes get their own filename! However, top-level @unnumbered
+ # still get their own file.
+ this_unnumbered = unnumbered_re.match (sec[0])
+ if not this_unnumbered or sec[0] == "unnumbered":
+ this_filename = this_anchor
+
+ if this_title != '' and this_title != 'Top':
+ f.write (this_title + "\t" + this_filename + "\t" + this_anchor + "\n")
+ f.close ()
+
+
+for filename in files:
+ print "extract_texi_filenames.py: Processing %s" % filename
+ (lang_suffix, sections) = extract_sections (filename)
+ process_sections (filename, lang_suffix, sections)
--- /dev/null
+#!@PYTHON@
+import sys
+import getopt
+import re
+import os
+
+(options, files) = \
+ getopt.getopt (sys.argv[1:],
+ '',
+ ['dir='])
+
+
+outdir = ''
+for opt in options:
+ o = opt[0]
+ a = opt[1]
+ if o == '--dir':
+ outdir = a
+ else:
+ print o
+ raise getopt.error
+
+# Ugh
+for design_size in [11,13,14,16,18,20,23,26]:
+ name = 'Emmentaler'
+ filename = name.lower ()
+ script = '''#!@FONTFORGE@
+
+New();
+
+# Separate Feta versioning?
+# * using 20 as Weight works for gnome-font-select widget: gfs
+
+notice = "";
+notice += "This font is distributed under the GNU General Public License. ";
+notice += "As a special exception, if you create a document which uses ";
+notice += "this font, and embed this font or unaltered portions of this ";
+notice += "font into the document, this font does not by itself cause the ";
+notice += "resulting document to be covered by the GNU General Public License.";;
+
+SetFontNames("%(name)s-%(design_size)d", "%(name)s", "%(name)s %(design_size)d", "%(design_size)d", notice, "@TOPLEVEL_VERSION@");
+
+MergeFonts("feta%(design_size)d.pfb");
+MergeFonts("parmesan%(design_size)d.pfb");
+
+# load nummer/din after setting PUA.
+i = 0;
+while (i < CharCnt())
+ Select(i);
+# crashes fontforge, use PUA for now -- jcn
+# SetUnicodeValue(i + 0xF0000, 0);
+/*
+PRIVATE AREA
+ In the BMP, the range 0xe000 to 0xf8ff will never be assigned to any
+ characters by the standard and is reserved for private usage. For the
+ Linux community, this private area has been subdivided further into the
+ range 0xe000 to 0xefff which can be used individually by any end-user
+ and the Linux zone in the range 0xf000 to 0xf8ff where extensions are
+ coordinated among all Linux users. The registry of the characters
+ assigned to the Linux zone is currently maintained by H. Peter Anvin
+ <Peter.Anvin@linux.org>.
+*/
+ SetUnicodeValue(i + 0xE000, 0);
+ ++i;
+endloop
+
+
+MergeFonts("feta-alphabet%(design_size)d.pfb");
+MergeKern("feta-alphabet%(design_size)d.tfm");
+
+LoadTableFromFile("LILF", "%(filename)s-%(design_size)d.subfonts");
+LoadTableFromFile("LILC", "feta%(design_size)d.otf-table");
+LoadTableFromFile("LILY", "feta%(design_size)d.otf-gtable");
+
+Generate("%(filename)s-%(design_size)d.otf");
+Generate("%(filename)s-%(design_size)d.svg");
+''' % vars()
+
+ basename = '%s-%d' % (filename, design_size)
+ path = os.path.join (outdir, basename + '.pe')
+ open (path, 'w').write (script)
+
+ subfonts = ['feta%(design_size)d',
+ 'parmesan%(design_size)d',
+ 'feta-alphabet%(design_size)d']
+
+ ns = []
+ for s in subfonts:
+ ns.append ('%s' % (s % vars()))
+
+ subfonts_str = ' '.join (ns)
+
+ open (os.path.join (outdir, '%(filename)s-%(design_size)d.subfonts' % vars()), 'w').write (subfonts_str)
+
+ path = os.path.join (outdir, '%s-%d.dep' % (filename, design_size))
+
+ deps = r'''%(filename)s-%(design_size)d.otf: $(outdir)/feta%(design_size)d.pfa \
+ $(outdir)/parmesan%(design_size)d.pfa \
+ $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-table \
+ $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-gtable
+''' % vars()
+ open (path, 'w').write (deps)
+
+ open (os.path.join (outdir, basename + '.fontname'), 'w').write ("%s-%d" % (name, design_size))
--- /dev/null
+#!@PYTHON@
+import os
+import sys
+import tempfile
+
+base = os.path.splitext (os.path.split (sys.argv[1])[1])[0]
+input = os.path.abspath (sys.argv[1])
+output = os.path.abspath (sys.argv[2])
+program_name= os.path.split (sys.argv[0])[1]
+
+dir = tempfile.mktemp (program_name)
+os.mkdir (dir, 0777)
+os.chdir(dir)
+
+def system (c):
+ print c
+ if os.system (c):
+ raise 'barf'
+
+outputs = []
+for sz in [48,32,16] :
+
+ for depth in [24,8]:
+ out = '%(base)s-%(sz)d-%(depth)d.png' % locals()
+ system ('convert -depth %(depth)d -sample %(sz)d %(input)s %(out)s' %
+ locals ())
+ outputs.append (out)
+
+system('icotool --output %s --create %s' % (output, ' '.join (outputs)))
+system('rm -rf %(dir)s' % locals())
+
--- /dev/null
+#!@BASH@
+# note: dash does not work
+
+pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*2007' '\1 2007--2008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change')
+pytt '(Copyright|\(c\)|\(C\)|@copyright\{\})\s*([^-]*--)(200[0-7])' '\1 \2\062008' $(find . -mindepth 2 -type f | grep -Ev 'out/|out-scons|out-www/|.git/|.scon|#|~' | grep -iv 'change')
--- /dev/null
+#!@PERL@ -w
+
+# Generate a short man page from --help and --version output.
+# Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software
+# Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+# Written by Brendan O'Dea <bod@debian.org>
+# Available from ftp://ftp.gnu.org/gnu/help2man/
+
+use 5.005;
+use strict;
+use Getopt::Long;
+use Text::Tabs qw(expand);
+use POSIX qw(strftime setlocale LC_TIME);
+
+my $this_program = 'help2man';
+my $this_version = '1.28';
+my $version_info = <<EOT;
+GNU $this_program $this_version
+
+Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+Written by Brendan O'Dea <bod\@debian.org>
+EOT
+
+my $help_info = <<EOT;
+`$this_program' generates a man page out of `--help' and `--version' output.
+
+Usage: $this_program [OPTIONS]... EXECUTABLE
+
+ -n, --name=STRING description for the NAME paragraph
+ -s, --section=SECTION section number for manual page (1, 6, 8)
+ -m, --manual=TEXT name of manual (User Commands, ...)
+ -S, --source=TEXT source of program (FSF, Debian, ...)
+ -i, --include=FILE include material from `FILE'
+ -I, --opt-include=FILE include material from `FILE' if it exists
+ -o, --output=FILE send output to `FILE'
+ -p, --info-page=TEXT name of Texinfo manual
+ -N, --no-info suppress pointer to Texinfo manual
+ --help print this help, then exit
+ --version print version number, then exit
+
+EXECUTABLE should accept `--help' and `--version' options although
+alternatives may be specified using:
+
+ -h, --help-option=STRING help option string
+ -v, --version-option=STRING version option string
+
+Report bugs to <bug-help2man\@gnu.org>.
+EOT
+
+my $section = 1;
+my $manual = '';
+my $source = '';
+my $help_option = '--help';
+my $version_option = '--version';
+my ($opt_name, @opt_include, $opt_output, $opt_info, $opt_no_info);
+
+my %opt_def = (
+ 'n|name=s' => \$opt_name,
+ 's|section=s' => \$section,
+ 'm|manual=s' => \$manual,
+ 'S|source=s' => \$source,
+ 'i|include=s' => sub { push @opt_include, [ pop, 1 ] },
+ 'I|opt-include=s' => sub { push @opt_include, [ pop, 0 ] },
+ 'o|output=s' => \$opt_output,
+ 'p|info-page=s' => \$opt_info,
+ 'N|no-info' => \$opt_no_info,
+ 'h|help-option=s' => \$help_option,
+ 'v|version-option=s' => \$version_option,
+);
+
+# Parse options.
+Getopt::Long::config('bundling');
+GetOptions (%opt_def,
+ help => sub { print $help_info; exit },
+ version => sub { print $version_info; exit },
+) or die $help_info;
+
+die $help_info unless @ARGV == 1;
+
+my %include = ();
+my %append = ();
+my @include = (); # retain order given in include file
+
+# Process include file (if given). Format is:
+#
+# [section name]
+# verbatim text
+#
+# or
+#
+# /pattern/
+# verbatim text
+#
+
+while (@opt_include)
+{
+ my ($inc, $required) = @{shift @opt_include};
+
+ next unless -f $inc or $required;
+ die "$this_program: can't open `$inc' ($!)\n"
+ unless open INC, $inc;
+
+ my $key;
+ my $hash = \%include;
+
+ while (<INC>)
+ {
+ # [section]
+ if (/^\[([^]]+)\]/)
+ {
+ $key = uc $1;
+ $key =~ s/^\s+//;
+ $key =~ s/\s+$//;
+ $hash = \%include;
+ push @include, $key unless $include{$key};
+ next;
+ }
+
+ # /pattern/
+ if (m!^/(.*)/([ims]*)!)
+ {
+ my $pat = $2 ? "(?$2)$1" : $1;
+
+ # Check pattern.
+ eval { $key = qr($pat) };
+ if ($@)
+ {
+ $@ =~ s/ at .*? line \d.*//;
+ die "$inc:$.:$@";
+ }
+
+ $hash = \%append;
+ next;
+ }
+
+ # Check for options before the first section--anything else is
+ # silently ignored, allowing the first for comments and
+ # revision info.
+ unless ($key)
+ {
+ # handle options
+ if (/^-/)
+ {
+ local @ARGV = split;
+ GetOptions %opt_def;
+ }
+
+ next;
+ }
+
+ $hash->{$key} ||= '';
+ $hash->{$key} .= $_;
+ }
+
+ close INC;
+
+ die "$this_program: no valid information found in `$inc'\n"
+ unless $key;
+}
+
+# Compress trailing blank lines.
+for my $hash (\(%include, %append))
+{
+ for (keys %$hash) { $hash->{$_} =~ s/\n+$/\n/ }
+}
+
+# Turn off localisation of executable's output.
+@ENV{qw(LANGUAGE LANG LC_ALL)} = ('C') x 3;
+
+# Turn off localisation of date (for strftime).
+setlocale LC_TIME, 'C';
+
+# Grab help and version info from executable.
+my ($help_text, $version_text) = map {
+ join '', map { s/ +$//; expand $_ } `$ARGV[0] $_ 2>/dev/null`
+ or die "$this_program: can't get `$_' info from $ARGV[0]\n"
+} $help_option, $version_option;
+
+my $date = strftime "%B %Y", localtime;
+(my $program = $ARGV[0]) =~ s!.*/!!;
+my $package = $program;
+my $version;
+
+if ($opt_output)
+{
+ unlink $opt_output
+ or die "$this_program: can't unlink $opt_output ($!)\n"
+ if -e $opt_output;
+
+ open STDOUT, ">$opt_output"
+ or die "$this_program: can't create $opt_output ($!)\n";
+}
+
+# The first line of the --version information is assumed to be in one
+# of the following formats:
+#
+# <version>
+# <program> <version>
+# {GNU,Free} <program> <version>
+# <program> ({GNU,Free} <package>) <version>
+# <program> - {GNU,Free} <package> <version>
+#
+# and seperated from any copyright/author details by a blank line.
+
+($_, $version_text) = split /\n+/, $version_text, 2;
+
+if (/^(\S+) +\(((?:GNU|Free) +[^)]+)\) +(.*)/ or
+ /^(\S+) +- *((?:GNU|Free) +\S+) +(.*)/)
+{
+ $program = $1;
+ $package = $2;
+ $version = $3;
+}
+elsif (/^((?:GNU|Free) +)?(\S+) +(.*)/)
+{
+ $program = $2;
+ $package = $1 ? "$1$2" : $2;
+ $version = $3;
+}
+else
+{
+ $version = $_;
+}
+
+$program =~ s!.*/!!;
+
+# No info for `info' itself.
+$opt_no_info = 1 if $program eq 'info';
+
+# --name overrides --include contents.
+$include{NAME} = "$program \\- $opt_name\n" if $opt_name;
+
+# Default (useless) NAME paragraph.
+$include{NAME} ||= "$program \\- manual page for $program $version\n";
+
+# Man pages traditionally have the page title in caps.
+my $PROGRAM = uc $program;
+
+# Set default page head/footers
+$source ||= "$program $version";
+unless ($manual)
+{
+ for ($section)
+ {
+ if (/^(1[Mm]|8)/) { $manual = 'System Administration Utilities' }
+ elsif (/^6/) { $manual = 'Games' }
+ else { $manual = 'User Commands' }
+ }
+}
+
+# Extract usage clause(s) [if any] for SYNOPSIS.
+if ($help_text =~ s/^Usage:( +(\S+))(.*)((?:\n(?: {6}\1| *or: +\S).*)*)//m)
+{
+ my @syn = $2 . $3;
+
+ if ($_ = $4)
+ {
+ s/^\n//;
+ for (split /\n/) { s/^ *(or: +)?//; push @syn, $_ }
+ }
+
+ my $synopsis = '';
+ for (@syn)
+ {
+ $synopsis .= ".br\n" if $synopsis;
+ s!^\S*/!!;
+ s/^(\S+) *//;
+ $synopsis .= ".B $1\n";
+ s/\s+$//;
+ s/(([][]|\.\.+)+)/\\fR$1\\fI/g;
+ s/^/\\fI/ unless s/^\\fR//;
+ $_ .= '\fR';
+ s/(\\fI)( *)/$2$1/g;
+ s/\\fI\\fR//g;
+ s/^\\fR//;
+ s/\\fI$//;
+ s/^\./\\&./;
+
+ $synopsis .= "$_\n";
+ }
+
+ $include{SYNOPSIS} ||= $synopsis;
+}
+
+# Process text, initial section is DESCRIPTION.
+my $sect = 'DESCRIPTION';
+$_ = "$help_text\n\n$version_text";
+
+# Normalise paragraph breaks.
+s/^\n+//;
+s/\n*$/\n/;
+s/\n\n+/\n\n/g;
+
+# Temporarily exchange leading dots, apostrophes and backslashes for
+# tokens.
+s/^\./\x80/mg;
+s/^'/\x81/mg;
+s/\\/\x82/g;
+
+# Start a new paragraph (if required) for these.
+s/([^\n])\n(Report +bugs|Email +bug +reports +to|Written +by)/$1\n\n$2/g;
+
+sub convert_option;
+
+while (length)
+{
+ # Convert some standard paragraph names.
+ if (s/^(Options|Examples): *\n//)
+ {
+ $sect = uc $1;
+ next;
+ }
+
+ # Copyright section
+ if (/^Copyright +[(\xa9]/)
+ {
+ $sect = 'COPYRIGHT';
+ $include{$sect} ||= '';
+ $include{$sect} .= ".PP\n" if $include{$sect};
+
+ my $copy;
+ ($copy, $_) = split /\n\n/, $_, 2;
+
+ for ($copy)
+ {
+ # Add back newline
+ s/\n*$/\n/;
+
+ # Convert iso9959-1 copyright symbol or (c) to nroff
+ # character.
+ s/^Copyright +(?:\xa9|\([Cc]\))/Copyright \\(co/mg;
+
+ # Insert line breaks before additional copyright messages
+ # and the disclaimer.
+ s/(.)\n(Copyright |This +is +free +software)/$1\n.br\n$2/g;
+
+ # Join hyphenated lines.
+ s/([A-Za-z])-\n */$1/g;
+ }
+
+ $include{$sect} .= $copy;
+ $_ ||= '';
+ next;
+ }
+
+ # Catch bug report text.
+ if (/^(Report +bugs|Email +bug +reports +to) /)
+ {
+ $sect = 'REPORTING BUGS';
+ }
+
+ # Author section.
+ elsif (/^Written +by/)
+ {
+ $sect = 'AUTHOR';
+ }
+
+ # Examples, indicated by an indented leading $, % or > are
+ # rendered in a constant width font.
+ if (/^( +)([\$\%>] )\S/)
+ {
+ my $indent = $1;
+ my $prefix = $2;
+ my $break = '.IP';
+ $include{$sect} ||= '';
+ while (s/^$indent\Q$prefix\E(\S.*)\n*//)
+ {
+ $include{$sect} .= "$break\n\\f(CW$prefix$1\\fR\n";
+ $break = '.br';
+ }
+
+ next;
+ }
+
+ my $matched = '';
+ $include{$sect} ||= '';
+
+ # Sub-sections have a trailing colon and the second line indented.
+ if (s/^(\S.*:) *\n / /)
+ {
+ $matched .= $& if %append;
+ $include{$sect} .= qq(.SS "$1"\n);
+ }
+
+ my $indent = 0;
+ my $content = '';
+
+ # Option with description.
+ if (s/^( {1,10}([+-]\S.*?))(?:( +(?!-))|\n( {20,}))(\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $indent = length ($4 || "$1$3");
+ $content = ".TP\n\x83$2\n\x83$5\n";
+ unless ($4)
+ {
+ # Indent may be different on second line.
+ $indent = length $& if /^ {20,}/;
+ }
+ }
+
+ # Option without description.
+ elsif (s/^ {1,10}([+-]\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $content = ".HP\n\x83$1\n";
+ $indent = 80; # not continued
+ }
+
+ # Indented paragraph with tag.
+ elsif (s/^( +(\S.*?) +)(\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $indent = length $1;
+ $content = ".TP\n\x83$2\n\x83$3\n";
+ }
+
+ # Indented paragraph.
+ elsif (s/^( +)(\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $indent = length $1;
+ $content = ".IP\n\x83$2\n";
+ }
+
+ # Left justified paragraph.
+ else
+ {
+ s/(.*)\n//;
+ $matched .= $& if %append;
+ $content = ".PP\n" if $include{$sect};
+ $content .= "$1\n";
+ }
+
+ # Append continuations.
+ while (s/^ {$indent}(\S.*)\n//)
+ {
+ $matched .= $& if %append;
+ $content .= "\x83$1\n"
+ }
+
+ # Move to next paragraph.
+ s/^\n+//;
+
+ for ($content)
+ {
+ # Leading dot and apostrophe protection.
+ s/\x83\./\x80/g;
+ s/\x83'/\x81/g;
+ s/\x83//g;
+
+ # Convert options.
+ s/(^| )(-[][\w=-]+)/$1 . convert_option $2/mge;
+ }
+
+ # Check if matched paragraph contains /pat/.
+ if (%append)
+ {
+ for my $pat (keys %append)
+ {
+ if ($matched =~ $pat)
+ {
+ $content .= ".PP\n" unless $append{$pat} =~ /^\./;
+ $content .= $append{$pat};
+ }
+ }
+ }
+
+ $include{$sect} .= $content;
+}
+
+# Refer to the real documentation.
+unless ($opt_no_info)
+{
+ my $info_page = $opt_info || $program;
+
+ $sect = 'SEE ALSO';
+ $include{$sect} ||= '';
+ $include{$sect} .= ".PP\n" if $include{$sect};
+ $include{$sect} .= <<EOT;
+The full documentation for
+.B $program
+is maintained as a Texinfo manual. If the
+.B info
+and
+.B $program
+programs are properly installed at your site, the command
+.IP
+.B info $info_page
+.PP
+should give you access to the complete manual.
+EOT
+}
+
+# Output header.
+print <<EOT;
+.\\" DO NOT MODIFY THIS FILE! It was generated by $this_program $this_version.
+.TH $PROGRAM "$section" "$date" "$source" "$manual"
+EOT
+
+# Section ordering.
+my @pre = qw(NAME SYNOPSIS DESCRIPTION OPTIONS EXAMPLES);
+my @post = ('AUTHOR', 'REPORTING BUGS', 'COPYRIGHT', 'SEE ALSO');
+my $filter = join '|', @pre, @post;
+
+# Output content.
+for (@pre, (grep ! /^($filter)$/o, @include), @post)
+{
+ if ($include{$_})
+ {
+ my $quote = /\W/ ? '"' : '';
+ print ".SH $quote$_$quote\n";
+
+ for ($include{$_})
+ {
+ # Replace leading dot, apostrophe and backslash tokens.
+ s/\x80/\\&./g;
+ s/\x81/\\&'/g;
+ s/\x82/\\e/g;
+ print;
+ }
+ }
+}
+
+exit;
+
+# Convert option dashes to \- to stop nroff from hyphenating 'em, and
+# embolden. Option arguments get italicised.
+sub convert_option
+{
+ local $_ = '\fB' . shift;
+
+ s/-/\\-/g;
+ unless (s/\[=(.*)\]$/\\fR[=\\fI$1\\fR]/)
+ {
+ s/=(.)/\\fR=\\fI$1/;
+ s/ (.)/ \\fI$1/;
+ $_ .= '\fR';
+ }
+
+ $_;
+}
--- /dev/null
+#!@PYTHON@
+# html-gettext.py
+
+# USAGE: html-gettext.py [-o OUTDIR] LANG FILES
+#
+# -o OUTDIR specifies that output files should be written in OUTDIR
+# rather than be overwritten
+#
+
+import sys
+import re
+import os
+import getopt
+
+import langdefs
+
+optlist, args = getopt.getopt(sys.argv[1:],'o:')
+lang = args[0]
+files = args [1:]
+
+outdir = '.'
+for x in optlist:
+ if x[0] == '-o':
+ outdir = x[1]
+
+double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
+my_gettext = langdefs.translation[lang]
+
+html_codes = ((' -- ', ' – '),
+ (' --- ', ' — '),
+ ("'", '’'))
+texi_html_conversion = {
+ 'command': {
+ 'html2texi':
+ (re.compile (r'(?:<samp><span class="command">|<code>)(.*?)(?:</span></samp>|</code>)'),
+ r'@command{\1}'),
+ 'texi2html':
+ (re.compile (r'@command{(.*?)}'),
+ r'<code>\1</code>'),
+ },
+ 'code': {
+ 'html2texi':
+ (re.compile (r'<code>(.*?)</code>'),
+ r'@code{\1}'),
+ 'texi2html':
+ (re.compile (r'@code{(.*?)}'),
+ r'<code>\1</code>'),
+ },
+ }
+
+whitespaces = re.compile (r'\s+')
+
+
+def _ (s):
+ if not s:
+ return ''
+ str = whitespaces.sub (' ', s)
+ for c in html_codes:
+ str = str.replace (c[1], c[0])
+ for command in texi_html_conversion:
+ d = texi_html_conversion[command]
+ str = d['html2texi'][0].sub (d['html2texi'][1], str)
+ str = my_gettext (str)
+ str = d['texi2html'][0].sub (d['texi2html'][1], str)
+ for c in html_codes:
+ str = str.replace (c[0], c[1])
+ return str
+
+link_re = re.compile (r'<link rel="(up|prev|next)" (.*?) title="([^"]*?)">')
+
+def link_gettext (m):
+ return '<link rel="' + m.group (1) + '" ' + m.group (2) \
+ + ' title="' + _ (m.group (3)) + '">'
+
+makeinfo_title_re = re.compile (r'<title>([^<]*?) - ([^<]*?)</title>')
+
+def makeinfo_title_gettext (m):
+ return '<title>' + _ (m.group (1)) + ' - ' + m.group (2) + '</title>'
+
+texi2html_title_re = re.compile (r'<title>(.+): ([A-Z\d.]+ |)(.+?)</title>')
+
+def texi2html_title_gettext (m):
+ return '<title>' + _ (m.group (1)) + double_punct_char_separator + ': ' \
+ + m.group (2) + _ (m.group (3)) + '</title>'
+
+a_href_re = re.compile ('(?s)<a (?P<attributes>[^>]*?href="[\\w.#-_]+"[^>]*?>)(?P<code><code>)?\
+(?P<appendix>Appendix )?(?P<leading>[A-Z0-9.]+ | (?:<){1,2} | [^>:]+?: | |)\
+(?P<name>(?:<samp><span class="command">|</?code>|</span>|[^>])+?)(?P<end_code>(?(code)</code>|))\
+(?P<trailing> (?:>){1,2} | |)</a>:?')
+
+def a_href_gettext (m):
+ s = ''
+ if m.group(0)[-1] == ':':
+ s = double_punct_char_separator + ':'
+ t = ''
+ if m.group ('appendix'):
+ t = _ (m.group ('appendix'))
+ return '<a ' + m.group ('attributes') + (m.group ('code') or '') + \
+ t + m.group ('leading') + _ (m.group ('name')) + \
+ m.group ('end_code') + m.group ('trailing') + '</a>' + s
+
+h_re = re.compile (r'<h(\d)( class="\w+"|)>\s*(Appendix |)([A-Z\d.]+ |)(.+?)\s*</h\1>')
+
+def h_gettext (m):
+ if m.group (3):
+ s = _ (m.group (3))
+ else:
+ s= ''
+ return '<h' + m.group (1) + m.group (2) + '>' + s +\
+ m.group (4) + _ (m.group (5)) + '</h' + m.group (1) + '>'
+
+for filename in files:
+ f = open (filename, 'r')
+ page = f.read ()
+ f.close ()
+ page = link_re.sub (link_gettext, page)
+ page = makeinfo_title_re.sub (makeinfo_title_gettext, page)
+ page = texi2html_title_re.sub (texi2html_title_gettext, page)
+ page = a_href_re.sub (a_href_gettext, page)
+ page = h_re.sub (h_gettext, page)
+ for w in ('Next:', 'Previous:', 'Up:'):
+ page = page.replace (w, _ (w))
+ page = langdefs.LANGDICT[lang].html_filter (page)
+ f = open (os.path.join (outdir, filename), 'w')
+ f.write (page)
+ f.close ()
--- /dev/null
+#!@BASH@
+
+name=install-info-html
+version=1.0
+
+all=
+index_dir=.
+
+#
+# debugging
+#
+debug_echo=:
+
+
+#
+# print usage
+#
+help ()
+{
+ cat << EOF
+$name $version
+Install HTML info document.
+
+Usage: $name [OPTIONS]... [DOCUMENT-DIR]...
+
+Options:
+ -a, --all assume all subdirectories of index to be DOCUMENT-DIRs
+ -d, --dir=DIR set index directory to DIR (default=.)
+ -D, --debug print debugging info
+ -h, --help show this help text
+ -v, --version show version
+EOF
+}
+
+
+cleanup ()
+{
+ $debug_echo "cleaning ($?)..."
+}
+
+trap cleanup 0 9 15
+
+#
+# Find command line options and switches
+#
+
+# "x:" x takes argument
+#
+options="adhvW:"
+#
+# ugh, "\-" is a hack to support long options
+# must be in double quotes for bash-2.0
+
+while getopts "\-:$options" O
+do
+ $debug_echo "O: \`$O'"
+ $debug_echo "arg: \`$OPTARG'"
+ case $O in
+ a)
+ all=yes
+ ;;
+ D)
+ [ "$debug_echo" = "echo" ] && set -x
+ debug_echo=echo
+ ;;
+ h)
+ help;
+ exit 0
+ ;;
+ v)
+ echo $name $version
+ exit 0
+ ;;
+ d)
+ index_dir=$OPTARG
+ ;;
+ # a long option!
+ -)
+ case "$OPTARG" in
+ a*|-a*)
+ all=yes
+ ;;
+ de*|-de*)
+ [ "$debug_echo" = "echo" ] && set -x
+ debug_echo=echo
+ ;;
+ h*|-h*)
+ help;
+ exit 0
+ ;;
+ di*|-di*)
+ index_dir="`expr \"$OPTARG\" ':' '[^=]*=\(.*\)'`"
+ ;;
+ version|-version)
+ echo $name $version
+ exit 0
+ ;;
+ *|-*)
+ echo "$0: invalid option -- \"$OPTARG\""
+ help;
+ exit -1
+ ;;
+ esac
+ esac
+done
+shift `expr $OPTIND - 1`
+
+#
+# Input file name
+#
+if [ -z "$all" -a -z "$1" ]; then
+ help
+ echo "$name: No HTML documents given"
+ exit 2
+fi
+
+if [ -n "$all" -a -n "$1" ]; then
+ echo "$name: --all specified, ignoring DIRECTORY-DIRs"
+fi
+
+if [ -n "$all" ]; then
+ document_dirs=`/bin/ls -d1 $index_dir`
+else
+ document_dirs=$*
+fi
+
+index_file=$index_dir/index.html
+rm -f $index_file
+echo -n "$name: Writing index: $index_file..."
+
+# head
+cat >> $index_file <<EOF
+<html>
+<title>Info documentation index</title>
+<body>
+<h1>Info documentation index</h1>
+<p>
+This is the directory file \`index.html' a.k.a. \`DIR', which contains the
+topmost node of the HTML Info hierarchy.
+</p>
+<ul>
+EOF
+
+#list
+for i in $document_dirs; do
+ cat <<EOF
+<li> <a href="$i/index.html">$i</a> (<a href="$i.html">$i as one big page</a>)</li>
+EOF
+done >> $index_file
+
+# foot
+cat >> $index_file <<EOF
+</ul>
+</body>
+</html>
+EOF
+echo
--- /dev/null
+#!@PYTHON@
+
+# Created 01 September 2003 by Heikki Junes.
+# Rewritten by John Mandereau
+
+# Generates lilypond-words.el for (X)Emacs and lilypond-words[.vim] for Vim.
+
+import re
+import sys
+import os
+import getopt
+
+keywords = []
+reserved_words = []
+note_names = []
+
+# keywords not otherwise found
+keywords += ['include', 'maininput', 'version']
+
+# the main keywords
+s = open ('lily/lily-lexer.cc', 'r').read ()
+keywords += [w for w in re.findall (r"\s*{\"(.+)\",\s*.*},\s*\n", s)]
+
+s = open ('scm/markup.scm', 'r').read ()
+keywords += [w for w in re.findall (r"(?m)^\s*\(cons\s*([a-z-]+)-markup", s)]
+
+# identifiers and keywords
+for name in ['ly/chord-modifiers-init.ly',
+ 'ly/dynamic-scripts-init.ly',
+ 'ly/engraver-init.ly',
+ 'ly/grace-init.ly',
+ 'ly/gregorian.ly',
+ 'ly/music-functions-init.ly',
+ 'ly/performer-init.ly',
+ 'ly/property-init.ly',
+ 'ly/scale-definitions-init.ly',
+ 'ly/script-init.ly',
+ 'ly/spanners-init.ly',
+ 'ly/declarations-init.ly',
+ 'ly/params-init.ly']:
+ s = open (name, 'r').read ()
+ keywords += [w for w in re.findall (r"(?m)^\s*\"?([a-zA-Z]+)\"?\s*=", s)]
+
+# note names
+for name in ['ly/catalan.ly',
+ 'ly/deutsch.ly',
+ 'ly/drumpitch-init.ly',
+ 'ly/english.ly',
+ 'ly/espanol.ly',
+ 'ly/italiano.ly',
+ 'ly/nederlands.ly',
+ 'ly/norsk.ly',
+ 'ly/portugues.ly',
+ 'ly/suomi.ly',
+ 'ly/svenska.ly',
+ 'ly/vlaams.ly']:
+ s = open (name, 'r').read ()
+ note_names += [n for n in re.findall (r"(?m)^\s*\(([a-z]+)[^l]+ly:make-pitch", s)]
+
+# reserved words
+for name in ['ly/engraver-init.ly',
+ 'ly/performer-init.ly']:
+ s = open (name, 'r').read ()
+ for pattern in [r"(?m)^\s*.consists\s+\"([a-zA-Z_]+)\"",
+ r"[\\]name\s+[\"]?([a-zA-Z_]+)[\"]?",
+ r"\s+([a-zA-Z_]+)\s*\\(?:set|override)"]:
+ reserved_words += [w for w in re.findall (pattern, s)]
+
+keywords = list (set (keywords))
+keywords.sort (reverse=True)
+
+reserved_words = list (set (reserved_words))
+reserved_words.sort (reverse=True)
+
+note_names = list (set (note_names))
+note_names.sort (reverse=True)
+
+
+# output
+outdir = ''
+out_words = False
+out_el = False
+out_vim = False
+
+options = getopt.getopt (sys.argv[1:],
+ '', ['words', 'el', 'vim', 'dir='])[0]
+
+for (o, a) in options:
+ if o == '--words':
+ out_words = True
+ elif o == '--el':
+ out_el = True
+ elif o == '--vim':
+ out_vim = True
+ elif o == '--dir':
+ outdir = a
+
+if out_words or out_el:
+ outstring = ''.join (['\\\\' + w + '\n' for w in keywords])
+ outstring += ''.join ([w + '\n' for w in reserved_words])
+ outstring += ''.join ([w + '\n' for w in note_names])
+
+if out_words:
+ f = open (os.path.join (outdir, 'lilypond-words'), 'w')
+ f.write (outstring)
+
+if out_el:
+ f = open (os.path.join (outdir, 'lilypond-words.el'), 'w')
+ f.write (outstring)
+
+ # the menu in lilypond-mode.el
+ # for easier typing of this list, replace '/' with '\' below
+ # when writing to file
+ elisp_menu = ['/( - _ /) -',
+ '/[ - _ /] -',
+ '< - _ > -',
+ '<< - _ >> -',
+ '///( - _ ///) -',
+ '///[ - _ ///] -',
+ '///< - _ ///! -',
+ '///> - _ ///! -',
+ '//center - / << _ >> -',
+ '//column - / << _ >> -',
+ '//context/ Staff/ = - % { _ } -',
+ '//context/ Voice/ = - % { _ } -',
+ '//markup - { _ } -',
+ '//notes - { _ } -',
+ '//relative - % { _ } -',
+ '//score - { //n /? //simultaneous { //n _ //n } /! //n //paper { } //n /? //midi { } //n /! } //n -',
+ '//simultaneous - { _ } -',
+ '//sustainDown - _ //sustainUp -',
+ '//times - % { _ } -',
+ '//transpose - % { _ } -',
+ '']
+ f.write ('\n'.join ([line.replace ('/', '\\') for line in elisp_menu]))
+
+if out_vim:
+ f = open (os.path.join (outdir, 'lilypond-words.vim'), 'w')
+ f.write ('syn match lilyKeyword \"[-_^]\\?\\\\\\(')
+ f.write (''.join ([w + '\\|' for w in keywords]))
+ f.write ('n\\)\\(\\A\\|\\n\\)\"me=e-1\n')
+
+ f.write ('syn match lilyReservedWord \"\\(\\A\\|\\n\\)\\(')
+ f.write (''.join ([w + '\\|' for w in reserved_words]))
+ f.write ('Score\\)\\(\\A\\|\\n\\)\"ms=s+1,me=e-1\n')
+
+ f.write ('syn match lilyNote \"\\<\\(\\(\\(')
+ f.write (''.join ([w + '\\|' for w in note_names]))
+ f.write ('a\\)\\([,\']\\)\\{,4}\\([?!]\\)\\?\\)\\|s\\|r\\)\\(\\(128\\|64\\|32\\|16\\|8\\|4\\|2\\|1\\|\\\\breve\\|\\\\longa\\|\\\\maxima\\)[.]\\{,8}\\)\\?\\(\\A\\|\\n\\)\"me=e-1\n')
--- /dev/null
+#!@PYTHON@
+
+
+'''
+TODO:
+
+ * Add @nodes, split at sections?
+
+'''
+
+
+import sys
+import os
+import getopt
+import re
+
+program_name = 'lys-to-tely'
+
+include_snippets = '@lysnippets'
+fragment_options = 'printfilename,texidoc'
+help_text = r"""Usage: %(program_name)s [OPTIONS]... LY-FILE...
+Construct tely doc from LY-FILEs.
+
+Options:
+ -h, --help print this help
+ -f, --fragment-options=OPTIONS use OPTIONS as lilypond-book fragment
+ options
+ -o, --output=NAME write tely doc to NAME
+ -t, --title=TITLE set tely doc title TITLE
+ --template=TEMPLATE use TEMPLATE as Texinfo template file,
+ instead of standard template; TEMPLATE should contain a command
+ '%(include_snippets)s' to tell where to insert LY-FILEs. When this
+ option is used, NAME and TITLE are ignored.
+"""
+
+def help (text):
+ sys.stdout.write ( text)
+ sys.exit (0)
+
+(options, files) = getopt.getopt (sys.argv[1:], 'f:hn:t:',
+ ['fragment-options=', 'help', 'name=', 'title=', 'template='])
+
+name = "ly-doc"
+title = "Ly Doc"
+template = '''\input texinfo
+@setfilename %%(name)s.info
+@settitle %%(title)s
+
+@documentencoding utf-8
+@iftex
+@afourpaper
+@end iftex
+
+@finalout @c we do not want black boxes.
+
+@c fool ls-latex
+@ignore
+@author Han-Wen Nienhuys and Jan Nieuwenhuizen
+@title %%(title)s
+@end ignore
+
+@node Top, , , (dir)
+@top %%(title)s
+
+%s
+
+@bye
+''' % include_snippets
+
+for opt in options:
+ o = opt[0]
+ a = opt[1]
+ if o == '-h' or o == '--help':
+ # We can't use vars () inside a function, as that only contains all
+ # local variables and none of the global variables! Thus we have to
+ # generate the help text here and pass it to the function...
+ help (help_text % vars ())
+ elif o == '-n' or o == '--name':
+ name = a
+ elif o == '-t' or o == '--title':
+ title = a
+ elif o == '-f' or o == '--fragment-options':
+ fragment_options = a
+ elif o == '--template':
+ template = open (a, 'r').read ()
+ else:
+ raise Exception ('unknown option: ' + o)
+
+texi_file_re = re.compile ('.*\.i?te(ly|xi)$')
+
+def name2line (n):
+ if texi_file_re.match (n):
+ # We have a texi include file, simply include it:
+ s = r"@include %s" % os.path.basename (n)
+ else:
+ # Assume it's a lilypond file -> create image etc.
+ s = r"""
+@ifhtml
+@html
+<a name="%s"></a>
+@end html
+@end ifhtml
+
+@lilypondfile[%s]{%s}
+""" % (os.path.basename (n), fragment_options, n)
+ return s
+
+if files:
+ dir = os.path.dirname (name) or "."
+# don't strip .tely extension, input/lsr uses .itely
+ name = os.path.basename (name)
+ template = template % vars ()
+
+ s = "\n".join (map (name2line, files))
+ s = template.replace (include_snippets, s, 1)
+ f = "%s/%s" % (dir, name)
+ sys.stderr.write ("%s: writing %s..." % (program_name, f))
+ h = open (f, "w")
+ h.write (s)
+ h.close ()
+ sys.stderr.write ('\n')
+else:
+ # not Unix philosophy, but hey, at least we notice when
+ # we don't distribute any .ly files.
+ sys.stderr.write ("No files specified. Doing nothing")
--- /dev/null
+#!@PYTHON@
+# mass-link.py
+
+# USAGE: mass-link.py [--prepend-suffix SUFFIX] symbolic | hard SOURCEDIR DESTDIR FILES
+#
+# create hard or symbolic links to SOURCEDIR/FILES in DESTDIR
+#
+# If --prepend-suffix is specified, link to foo.bar will be called fooSUFFIX.bar.
+# Shell wildcards expansion is performed on FILES.
+
+import sys
+import os
+import glob
+import getopt
+
+print "mass-link.py"
+
+optlist, args = getopt.getopt (sys.argv[1:], '', ['prepend-suffix='])
+link_type, source_dir, dest_dir = args[0:3]
+files = args[3:]
+
+source_dir = os.path.normpath (source_dir)
+dest_dir = os.path.normpath (dest_dir)
+
+prepended_suffix = ''
+for x in optlist:
+ if x[0] == '--prepend-suffix':
+ prepended_suffix = x[1]
+
+if prepended_suffix:
+ def insert_suffix (p):
+ l = p.split ('.')
+ if len (l) >= 2:
+ l[-2] += prepended_suffix
+ return '.'.join (l)
+ return p + prepended_suffix
+else:
+ insert_suffix = lambda p: p
+
+if link_type == 'symbolic':
+ link = os.symlink
+elif link_type == 'hard':
+ link = os.link
+else:
+ sys.stderr.write(sys.argv[0] + ': ' + link_type + ": wrong argument, expected 'symbolic' or 'hard'\n")
+ sys.exit (1)
+
+sourcefiles = []
+for pattern in files:
+ sourcefiles += (glob.glob (os.path.join (source_dir, pattern)))
+
+def relative_path (f):
+ if source_dir == '.':
+ return f
+ return f[len (source_dir) + 1:]
+
+destfiles = [os.path.join (dest_dir, insert_suffix (relative_path (f))) for f in sourcefiles]
+
+destdirs = set ([os.path.dirname (dest) for dest in destfiles])
+[os.makedirs (d) for d in destdirs if not os.path.exists (d)]
+
+def force_link (src,dest):
+ if os.path.exists (dest):
+ os.system ('rm -f ' + dest)
+ link (src, dest)
+
+map (force_link, sourcefiles, destfiles)
--- /dev/null
+#!@PYTHON@
+
+# mf-to-table.py -- convert spacing info in MF logs .
+#
+# source file of the GNU LilyPond music typesetter
+#
+# (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
+
+import os
+import sys
+import getopt
+import re
+import time
+
+def read_log_file (fn):
+ str = open (fn).read ()
+ str = re.sub ('\n', '', str)
+ str = re.sub ('[\t ]+', ' ', str)
+
+ deps = []
+ autolines = []
+ def include_func (match, d = deps):
+ d.append (match.group (1))
+ return ''
+
+ def auto_func (match, a = autolines):
+ a.append (match.group (1))
+ return ''
+
+ str = re.sub ('\\(([/a-z.A-Z_0-9-]+\\.mf)', include_func, str)
+ str = re.sub ('@{(.*?)@}', auto_func, str)
+
+ return (autolines, deps)
+
+
+class Char_metric:
+ def __init__ (self):
+ pass
+
+font_family = 'feta'
+
+def parse_logfile (fn):
+ autolines, deps = read_log_file (fn)
+ charmetrics = []
+
+ global_info = {
+ 'filename' : os.path.splitext (os.path.basename (fn))[0]
+ }
+ group = ''
+
+ for l in autolines:
+ tags = l.split ('@:')
+ if tags[0] == 'group':
+ group = tags[1]
+ elif tags[0] == 'puorg':
+ group = ''
+ elif tags[0] == 'char':
+ name = tags[9]
+
+ if group:
+ name = group + '.' + name
+ m = {
+ 'description': tags[1],
+ 'name': name,
+ 'code': int (tags[2]),
+ 'breapth': float (tags[3]),
+ 'width': float (tags[4]),
+ 'depth': float (tags[5]),
+ 'height': float (tags[6]),
+ 'wx': float (tags[7]),
+ 'wy': float (tags[8]),
+ }
+ charmetrics.append (m)
+ elif tags[0] == 'font':
+ global font_family
+ font_family = (tags[3])
+ # To omit 'GNU' (foundry) from font name proper:
+ # name = tags[2:]
+ #urg
+ if 0: # testing
+ tags.append ('Regular')
+
+ encoding = re.sub (' ','-', tags[5])
+ tags = tags[:-1]
+ name = tags[1:]
+ global_info['design_size'] = float (tags[4])
+ global_info['FontName'] = '-'.join (name)
+ global_info['FullName'] = ' '.join (name)
+ global_info['FamilyName'] = '-'.join (name[1:-1])
+ if 1:
+ global_info['Weight'] = tags[4]
+ else: # testing
+ global_info['Weight'] = tags[-1]
+
+ global_info['FontBBox'] = '0 0 1000 1000'
+ global_info['Ascender'] = '0'
+ global_info['Descender'] = '0'
+ global_info['EncodingScheme'] = encoding
+
+ elif tags[0] == 'parameter':
+ global_info[tags[1]] = tags[2];
+
+ return (global_info, charmetrics, deps)
+
+
+
+def character_lisp_table (global_info, charmetrics):
+
+ def conv_char_metric (charmetric):
+ f = 1.0
+ s = """(%s .
+((bbox . (%f %f %f %f))
+(subfont . "%s")
+(subfont-index . %d)
+(attachment . (%f . %f))))
+""" %(charmetric['name'],
+ -charmetric['breapth'] * f,
+ -charmetric['depth'] * f,
+ charmetric['width'] * f,
+ charmetric['height'] * f,
+ global_info['filename'],
+ charmetric['code'],
+ charmetric['wx'],
+ charmetric['wy'])
+
+ return s
+
+ s = ''
+ for c in charmetrics:
+ s += conv_char_metric (c)
+
+ return s
+
+
+def global_lisp_table (global_info):
+ str = ''
+
+ keys = ['staffsize', 'stafflinethickness', 'staff_space',
+ 'linethickness', 'black_notehead_width', 'ledgerlinethickness',
+ 'design_size',
+ 'blot_diameter'
+ ]
+ for k in keys:
+ if global_info.has_key (k):
+ str = str + "(%s . %s)\n" % (k,global_info[k])
+
+ return str
+
+
+def ps_encoding (name, global_info, charmetrics):
+ encs = ['.notdef'] * 256
+ for m in charmetrics:
+ encs[m['code']] = m['name']
+
+
+ s = ('/%s [\n' % name)
+ for m in range (0, 256):
+ s += (' /%s %% %d\n' % (encs[m], m))
+ s += ('] def\n')
+ return s
+
+def get_deps (deps, targets):
+ s = ''
+ for t in targets:
+ t = re.sub ( '^\\./', '', t)
+ s += ('%s '% t)
+ s += (": ")
+ for d in deps:
+ s += ('%s ' % d)
+ s += ('\n')
+ return s
+
+def help ():
+ sys.stdout.write(r"""Usage: mf-to-table [OPTIONS] LOGFILEs
+
+Generate feta metrics table from preparated feta log.
+
+Options:
+ -d, --dep=FILE print dependency info to FILE
+ -h, --help print this help
+ -l, --ly=FILE name output table
+ -o, --outdir=DIR prefix for dependency info
+ -p, --package=DIR specify package
+
+ """)
+ sys.exit (0)
+
+
+(options, files) = \
+ getopt.getopt (sys.argv[1:],
+ 'a:d:ho:p:t:',
+ ['enc=', 'outdir=', 'dep=', 'lisp=',
+ 'global-lisp=',
+ 'debug', 'help', 'package='])
+
+global_lisp_nm = ''
+char_lisp_nm = ''
+enc_nm = ''
+depfile_nm = ''
+lyfile_nm = ''
+outdir_prefix = '.'
+
+for opt in options:
+ o = opt[0]
+ a = opt[1]
+ if o == '--dep' or o == '-d':
+ depfile_nm = a
+ elif o == '--outdir' or o == '-o':
+ outdir_prefix = a
+ elif o == '--lisp':
+ char_lisp_nm = a
+ elif o == '--global-lisp':
+ global_lisp_nm = a
+ elif o == '--enc':
+ enc_nm = a
+ elif o== '--help' or o == '-h':
+ help()
+ elif o == '--debug':
+ debug_b = 1
+ else:
+ print o
+ raise getopt.error
+
+base = os.path.splitext (lyfile_nm)[0]
+
+for filenm in files:
+ (g, m, deps) = parse_logfile (filenm)
+
+ enc_name = 'FetaEncoding'
+ if re.search ('parmesan', filenm):
+ enc_name = 'ParmesanEncoding'
+ elif re.search ('feta-brace', filenm):
+ enc_name = 'FetaBraceEncoding'
+ elif re.search ('feta-alphabet', filenm):
+ enc_name = 'FetaAlphabetEncoding';
+
+ open (enc_nm, 'w').write (ps_encoding (enc_name, g, m))
+ open (char_lisp_nm, 'w').write (character_lisp_table (g, m))
+ open (global_lisp_nm, 'w').write (global_lisp_table (g))
+ if depfile_nm:
+ open (depfile_nm, 'wb').write (get_deps (deps,
+ [base + '.log', base + '.dvi', base + '.pfa',
+ depfile_nm,
+ base + '.pfb']))
--- /dev/null
+#!@PERL@
+
+##################################################
+# Convert stylized Metafont to PostScript Type 1 #
+# By Scott Pakin <scott+mf@pakin.org> #
+##################################################
+
+########################################################################
+# mf2pt1 #
+# Copyright (C) 2008 Scott Pakin #
+# #
+# This program may be distributed and/or modified under the conditions #
+# of the LaTeX Project Public License, either version 1.3c of this #
+# license or (at your option) any later version. #
+# #
+# The latest version of this license is in: #
+# #
+# http://www.latex-project.org/lppl.txt #
+# #
+# and version 1.3c or later is part of all distributions of LaTeX #
+# version 2006/05/20 or later. #
+########################################################################
+
+our $VERSION = "2.4.4"; # mf2pt1 version number
+require 5.6.1; # I haven't tested mf2pt1 with older Perl versions
+
+use File::Basename;
+use File::Spec;
+use Getopt::Long;
+use Pod::Usage;
+use Math::Trig;
+use warnings;
+use strict;
+
+# Define some common encoding vectors.
+my @standardencoding =
+ ((map {"_a$_"} (0..31)),
+ qw (space exclam quotedbl numbersign dollar percent ampersand
+ quoteright parenleft parenright asterisk plus comma hyphen
+ period slash zero one two three four five six seven eight
+ nine colon semicolon less equal greater question at A B C D E
+ F G H I J K L M N O P Q R S T U V W X Y Z bracketleft
+ backslash bracketright asciicircum underscore quoteleft a b c
+ d e f g h i j k l m n o p q r s t u v w x y z braceleft bar
+ braceright asciitilde),
+ (map {"_a$_"} (127..160)),
+ qw (exclamdown cent sterling fraction yen florin section currency
+ quotesingle quotedblleft guillemotleft guilsinglleft
+ guilsinglright fi fl _a176 endash dagger daggerdbl
+ periodcentered _a181 paragraph bullet quotesinglbase
+ quotedblbase quotedblright guillemotright ellipsis
+ perthousand _a190 questiondown _a192 grave acute circumflex
+ tilde macron breve dotaccent dieresis _a201 ring cedilla
+ _a204 hungarumlaut ogonek caron emdash),
+ (map {"_a$_"} (209..224)),
+ qw (AE _a226 ordfeminine _a228 _a229 _a230 _a231 Lslash Oslash OE
+ ordmasculine _a236 _a237 _a238 _a239 _a240 ae _a242 _a243
+ _a244 dotlessi _a246 _a247 lslash oslash oe germandbls _a252
+ _a253 _a254 _a255));
+my @isolatin1encoding =
+ ((map {"_a$_"} (0..31)),
+ qw (space exclam quotedbl numbersign dollar percent ampersand
+ quoteright parenleft parenright asterisk plus comma minus
+ period slash zero one two three four five six seven eight
+ nine colon semicolon less equal greater question at A B C D E
+ F G H I J K L M N O P Q R S T U V W X Y Z bracketleft
+ backslash bracketright asciicircum underscore quoteleft a b c
+ d e f g h i j k l m n o p q r s t u v w x y z braceleft bar
+ braceright asciitilde),
+ (map {"_a$_"} (128..143)),
+ qw (dotlessi grave acute circumflex tilde macron breve dotaccent
+ dieresis _a153 ring cedilla _a156 hungarumlaut ogonek
+ caron space exclamdown cent sterling currency yen brokenbar
+ section dieresis copyright ordfeminine guillemotleft
+ logicalnot hyphen registered macron degree plusminus
+ twosuperior threesuperior acute mu paragraph periodcentered
+ cedilla onesuperior ordmasculine guillemotright onequarter
+ onehalf threequarters questiondown Agrave Aacute Acircumflex
+ Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex
+ Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde
+ Ograve Oacute Ocircumflex Otilde Odieresis multiply Oslash
+ Ugrave Uacute Ucircumflex Udieresis Yacute Thorn germandbls
+ agrave aacute acircumflex atilde adieresis aring ae ccedilla
+ egrave eacute ecircumflex edieresis igrave iacute icircumflex
+ idieresis eth ntilde ograve oacute ocircumflex otilde
+ odieresis divide oslash ugrave uacute ucircumflex udieresis
+ yacute thorn ydieresis));
+my @ot1encoding =
+ qw (Gamma Delta Theta Lambda Xi Pi Sigma Upsilon Phi
+ Psi Omega ff fi fl ffi ffl dotlessi dotlessj grave acute caron
+ breve macron ring cedilla germandbls ae oe oslash AE OE Oslash
+ suppress exclam quotedblright numbersign dollar percent
+ ampersand quoteright parenleft parenright asterisk plus comma
+ hyphen period slash zero one two three four five six seven
+ eight nine colon semicolon exclamdown equal questiondown
+ question at A B C D E F G H I J K L M N O P Q R S T U V W X Y
+ Z bracketleft quotedblleft bracketright circumflex dotaccent
+ quoteleft a b c d e f g h i j k l m n o p q r s t u v w x y z
+ endash emdash hungarumlaut tilde dieresis);
+my @t1encoding =
+ qw (grave acute circumflex tilde dieresis hungarumlaut ring caron
+ breve macron dotaccent cedilla ogonek quotesinglbase
+ guilsinglleft guilsinglright quotedblleft quotedblright
+ quotedblbase guillemotleft guillemotright endash emdash cwm
+ perthousand dotlessi dotlessj ff fi fl ffi ffl space exclam
+ quotedbl numbersign dollar percent ampersand quoteright
+ parenleft parenright asterisk plus comma hyphen period slash
+ zero one two three four five six seven eight nine colon
+ semicolon less equal greater question at A B C D E F G H I J K L
+ M N O P Q R S T U V W X Y Z bracketleft backslash bracketright
+ asciicircum underscore quoteleft a b c d e f g h i j k l m n o p
+ q r s t u v w x y z braceleft bar braceright asciitilde
+ sfthyphen Abreve Aogonek Cacute Ccaron Dcaron Ecaron Eogonek
+ Gbreve Lacute Lcaron Lslash Nacute Ncaron Eng Ohungarumlaut
+ Racute Rcaron Sacute Scaron Scedilla Tcaron Tcedilla
+ Uhungarumlaut Uring Ydieresis Zacute Zcaron Zdotaccent IJ
+ Idotaccent dcroat section abreve aogonek cacute ccaron dcaron
+ ecaron eogonek gbreve lacute lcaron lslash nacute ncaron eng
+ ohungarumlaut racute rcaron sacute scaron scedilla tcaron
+ tcedilla uhungarumlaut uring ydieresis zacute zcaron zdotaccent
+ ij exclamdown questiondown sterling Agrave Aacute Acircumflex
+ Atilde Adieresis Aring AE Ccedilla Egrave Eacute Ecircumflex
+ Edieresis Igrave Iacute Icircumflex Idieresis Eth Ntilde Ograve
+ Oacute Ocircumflex Otilde Odieresis OE Oslash Ugrave Uacute
+ Ucircumflex Udieresis Yacute Thorn SS agrave aacute acircumflex
+ atilde adieresis aring ae ccedilla egrave eacute ecircumflex
+ edieresis igrave iacute icircumflex idieresis eth ntilde ograve
+ oacute ocircumflex otilde odieresis oe oslash ugrave uacute
+ ucircumflex udieresis yacute thorn germandbls);
+
+# Define font parameters that the user can override.
+my $fontversion;
+my $creationdate;
+my $comment;
+my $familyname;
+my $weight;
+my $fullname;
+my $fixedpitch;
+my $italicangle;
+my $underlinepos;
+my $underlinethick;
+my $fontname;
+my $uniqueID;
+my $designsize;
+my ($mffile, $pt1file, $pfbfile, $ffscript);
+my $encoding;
+my $rounding;
+my $bpppix;
+
+# Define all of our other global variables.
+my $progname = basename $0, ".pl";
+my $mag;
+my @fontbbox;
+my @charbbox;
+my @charwd;
+my @glyphname;
+my @charfiles;
+my $filebase;
+my $filedir;
+my $filenoext;
+my $versionmsg = "mf2pt1 version $VERSION
+
+Copyright (C) 2008 Scott Pakin
+
+This program may be distributed and/or modified under the conditions
+of the LaTeX Project Public License, either version 1.3c of this
+license or (at your option) any later version.
+
+The latest version of this license is in:
+
+ http://www.latex-project.org/lppl.txt
+
+and version 1.3c or later is part of all distributions of LaTeX
+version 2006/05/20 or later.
+";
+
+
+######################################################################
+
+# The routines to compute the fractional approximation of a real number
+# are heavily based on code posted by Ben Tilly
+# <http://www.perlmonks.org/?node_id=26179> on Nov 16th, 2000, to the
+# PerlMonks list. See <http://www.perlmonks.org/index.pl?node_id=41961>.
+
+
+# Takes numerator/denominator pairs.
+# Returns a PS fraction string representation (with a trailing space).
+sub frac_string (@)
+{
+ my $res = "";
+
+ while (@_) {
+ my $n = shift;
+ my $d = shift;
+ $res .= $n . " ";
+ $res .= $d . " div " if $d > 1;
+ }
+
+ return $res;
+}
+
+
+# Takes a number.
+# Returns a numerator and denominator with the smallest denominator
+# so that the difference of the resulting fraction to the number is
+# smaller or equal to $rounding.
+sub frac_approx ($)
+{
+ my $num = shift;
+ my $f = ret_frac_iter ($num);
+
+ while (1) {
+ my ($n, $m) = $f->();
+ my $approx = $n / $m;
+ my $delta = abs ($num - $approx);
+ return ($n, $m) if ($delta <= $rounding);
+ }
+}
+
+
+# Takes a number, returns the best integer approximation and (in list
+# context) the error.
+sub best_int ($)
+{
+ my $x = shift;
+ my $approx = sprintf '%.0f', $x;
+ if (wantarray) {
+ return ($approx, $x - $approx);
+ }
+ else {
+ return $approx;
+ }
+}
+
+
+# Takes a numerator and denominator, in scalar context returns
+# the best fraction describing them, in list the numerator and
+# denominator.
+sub frac_standard ($$)
+{
+ my $n = best_int(shift);
+ my $m = best_int(shift);
+ my $k = gcd($n, $m);
+ $n /= $k;
+ $m /= $k;
+ if ($m < 0) {
+ $n *= -1;
+ $m *= -1;
+ }
+ if (wantarray) {
+ return ($n, $m);
+ }
+ else {
+ return "$n/$m";
+ }
+}
+
+
+# Euclidean algorithm for calculating a GCD.
+# Takes two integers, returns the greatest common divisor.
+sub gcd ($$)
+{
+ my ($n, $m) = @_;
+ while ($m) {
+ my $k = $n % $m;
+ ($n, $m) = ($m, $k);
+ }
+ return $n;
+}
+
+
+# Takes a list of terms in a continued fraction, and converts it
+# into a fraction.
+sub ints_to_frac (@)
+{
+ my ($n, $m) = (0, 1); # Start with 0
+ while (@_) {
+ my $k = pop;
+ if ($n) {
+ # Want frac for $k + 1/($n/$m)
+ ($n, $m) = frac_standard($k*$n + $m, $n);
+ }
+ else {
+ # Want $k
+ ($n, $m) = frac_standard($k, 1);
+ }
+ }
+ return frac_standard($n, $m);
+}
+
+
+# Takes a number, returns an anon sub which iterates through a set of
+# fractional approximations that converges very quickly to the number.
+sub ret_frac_iter ($)
+{
+ my $x = shift;
+ my $term_iter = ret_next_term_iter($x);
+ my @ints;
+ return sub {
+ push @ints, $term_iter->();
+ return ints_to_frac(@ints);
+ }
+}
+
+
+# Terms of a continued fraction converging on that number.
+sub ret_next_term_iter ($)
+{
+ my $x = shift;
+ return sub {
+ (my $n, $x) = best_int($x);
+ if (0 != $x) {
+ $x = 1/$x;
+ }
+ return $n;
+ }
+}
+
+######################################################################
+
+# Round a number to the nearest integer.
+sub round ($)
+{
+ return int($_[0] + 0.5*($_[0] <=> 0));
+}
+
+
+# Round a number to a given precision.
+sub prec ($)
+{
+ return round ($_[0] / $rounding) * $rounding;
+}
+
+
+# Set a variable's value to the first defined value in the given list.
+# If the variable was not previously defined and no value in the list
+# is defined, do nothing.
+sub assign_default (\$@)
+{
+ my $varptr = shift; # Pointer to variable to define
+ return if defined $$varptr && $$varptr ne "UNSPECIFIED";
+ foreach my $val (@_) {
+ next if !defined $val;
+ $$varptr = $val;
+ return;
+ }
+}
+
+
+# Print and execute a shell command. An environment variable with the
+# same name as the command overrides the command name. Return 1 on
+# success, 0 on failure. Optionally abort if the command fails, based
+# on the first argument to execute_command.
+sub execute_command ($@)
+{
+ my $abort_on_failure = shift;
+ my @command = @_;
+ $command[0] = $ENV{uc $command[0]} || $command[0];
+ my $prettyargs = join (" ", map {/[\\ ]/ ? "'$_'" : $_} @command);
+ print "Invoking \"$prettyargs\"...\n";
+ my $result = system @command;
+ die "${progname}: \"$prettyargs\" failed ($!)\n" if $result && $abort_on_failure;
+ return !$result;
+}
+
+
+# Output the font header.
+sub output_header ()
+{
+ # Show the initial boilerplate.
+ print OUTFILE <<"ENDHEADER";
+%!FontType1-1.0: $fontname $fontversion
+%%CreationDate: $creationdate
+% Font converted to Type 1 by mf2pt1, written by Scott Pakin.
+11 dict begin
+/FontInfo 11 dict dup begin
+/version ($fontversion) readonly def
+/Notice ($comment) readonly def
+/FullName ($fullname) readonly def
+/FamilyName ($familyname) readonly def
+/Weight ($weight) readonly def
+/ItalicAngle $italicangle def
+/isFixedPitch $fixedpitch def
+/UnderlinePosition $underlinepos def
+/UnderlineThickness $underlinethick def
+end readonly def
+/FontName /$fontname def
+ENDHEADER
+
+ # If we're not using an encoding that PostScript knows about, then
+ # create an encoding vector.
+ if ($encoding==\@standardencoding) {
+ print OUTFILE "/Encoding StandardEncoding def\n";
+ }
+ else {
+ print OUTFILE "/Encoding 256 array\n";
+ print OUTFILE "0 1 255 {1 index exch /.notdef put} for\n";
+ foreach my $charnum (0 .. $#{$encoding}) {
+ if ($encoding->[$charnum] && $encoding->[$charnum]!~/^_a\d+$/) {
+ print OUTFILE "dup $charnum /$encoding->[$charnum] put\n";
+ }
+ }
+ print OUTFILE "readonly def\n";
+ }
+
+ # Show the final boilerplate.
+ print OUTFILE <<"ENDHEADER";
+/PaintType 0 def
+/FontType 1 def
+/FontMatrix [0.001 0 0 0.001 0 0] readonly def
+/UniqueID $uniqueID def
+/FontBBox{@fontbbox}readonly def
+currentdict end
+currentfile eexec
+dup /Private 5 dict dup begin
+/RD{string currentfile exch readstring pop}executeonly def
+/ND{noaccess def}executeonly def
+/NP{noaccess put}executeonly def
+ENDHEADER
+}
+
+
+# Use MetaPost to generate one PostScript file per character. We
+# calculate the font bounding box from these characters and store them
+# in @fontbbox. If the input parameter is 1, set other font
+# parameters, too.
+sub get_bboxes ($)
+{
+ execute_command 1, ("mpost", "-mem=mf2pt1", "-progname=mpost",
+ "\\mode:=localfont; mag:=$mag; bpppix $bpppix; input $mffile");
+ opendir (CURDIR, ".") || die "${progname}: $! ($filedir)\n";
+ @charfiles = sort
+ { ($a=~ /\.(\d+)$/)[0] <=> ($b=~ /\.(\d+)$/)[0] }
+ grep /^$filebase.*\.\d+$/, readdir(CURDIR);
+ close CURDIR;
+ @fontbbox = (1000000, 1000000, -1000000, -1000000);
+ foreach my $psfile (@charfiles) {
+ # Read the character number from the output file's extension.
+ $psfile =~ /\.(\d+)$/;
+ my $charnum = $1;
+
+ # Process in turn each line of the current PostScript file.
+ my $havebbox = 0;
+ open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n";
+ while (<PSFILE>) {
+ my @tokens = split " ";
+ if ($tokens[0] eq "%%BoundingBox:") {
+ # Store the MetaPost-produced bounding box, just in case
+ # the given font doesn't use beginchar.
+ @tokens = ("%", "MF2PT1:", "glyph_dimensions", @tokens[1..4]);
+ $havebbox--;
+ }
+ next if $#tokens<1 || $tokens[1] ne "MF2PT1:";
+
+ # Process a "special" inserted into the generated PostScript.
+ MF2PT1_CMD:
+ {
+ # glyph_dimensions llx lly urx ury -- specified glyph dimensions
+ $tokens[2] eq "glyph_dimensions" && do {
+ my @bbox = @tokens[3..6];
+ $fontbbox[0]=$bbox[0] if $bbox[0]<$fontbbox[0];
+ $fontbbox[1]=$bbox[1] if $bbox[1]<$fontbbox[1];
+ $fontbbox[2]=$bbox[2] if $bbox[2]>$fontbbox[2];
+ $fontbbox[3]=$bbox[3] if $bbox[3]>$fontbbox[3];
+ $charbbox[$charnum] = \@bbox;
+ $havebbox++;
+ last MF2PT1_CMD;
+ };
+
+ # If all we want is the bounding box, exit the loop now.
+ last MF2PT1_CMD if !$_[0];
+
+ # glyph_name name -- glyph name
+ $tokens[2] eq "glyph_name" && do {
+ $glyphname[$charnum] = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # charwd wd -- character width as in TFM
+ $tokens[2] eq "charwd" && do {
+ $charwd[$charnum] = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_identifier name -- full font name
+ $tokens[2] eq "font_identifier" && do {
+ $fullname = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_size number -- font design size (pt, not bp)
+ $tokens[2] eq "font_size" && $tokens[3] && do {
+ $designsize = $tokens[3] * 72 / 72.27;
+ last MF2PT1_CMD;
+ };
+
+ # font_slant number -- italic amount
+ $tokens[2] eq "font_slant" && do {
+ $italicangle = 0 + rad2deg (atan(-$tokens[3]));
+ last MF2PT1_CMD;
+ };
+
+ # font_coding_scheme string -- font encoding
+ $tokens[2] eq "font_coding_scheme" && do {
+ $encoding = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_version string -- font version number (xxx.yyy)
+ $tokens[2] eq "font_version" && do {
+ $fontversion = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_comment string -- font comment notice
+ $tokens[2] eq "font_comment" && do {
+ $comment = join (" ", @tokens[3..$#tokens]);
+ last MF2PT1_CMD;
+ };
+
+ # font_family string -- font family name
+ $tokens[2] eq "font_family" && do {
+ $familyname = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_weight string -- font weight (e.g., "Book" or "Heavy")
+ $tokens[2] eq "font_weight" && do {
+ $weight = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_fixed_pitch number -- fixed width font (0=false, 1=true)
+ $tokens[2] eq "font_fixed_pitch" && do {
+ $fixedpitch = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_underline_position number -- vertical underline position
+ $tokens[2] eq "font_underline_position" && do {
+ # We store $underlinepos in points and later
+ # scale it by 1000/$designsize.
+ $underlinepos = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_underline_thickness number -- thickness of underline
+ $tokens[2] eq "font_underline_thickness" && do {
+ # We store $underlinethick in points and later
+ # scale it by 1000/$designsize.
+ $underlinethick = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_name string -- font name
+ $tokens[2] eq "font_name" && do {
+ $fontname = $tokens[3];
+ last MF2PT1_CMD;
+ };
+
+ # font_unique_id number (as string) -- globally unique font ID
+ $tokens[2] eq "font_unique_id" && do {
+ $uniqueID = 0+$tokens[3];
+ last MF2PT1_CMD;
+ };
+ }
+ }
+ close PSFILE;
+ if (!$havebbox) {
+ warn "${progname}: No beginchar in character $charnum; glyph dimensions are probably incorrect\n";
+ }
+ }
+}
+
+
+# Convert ordinary, MetaPost-produced PostScript files into Type 1
+# font programs.
+sub output_font_programs ()
+{
+ # Iterate over all the characters. We convert each one, line by
+ # line and token by token.
+ print "Converting PostScript graphics to Type 1 font programs...\n";
+ foreach my $psfile (@charfiles) {
+ # Initialize the font program.
+ $psfile =~ /\.(\d+)$/;
+ my $charnum = $1;
+ my $gname = $glyphname[$charnum] || $encoding->[$charnum];
+ my @fontprog;
+ push @fontprog, ("/$gname {",
+ frac_string (frac_approx ($charbbox[$charnum]->[0]),
+ frac_approx ($charwd[$charnum] * $mag))
+ . "hsbw");
+ my ($cpx, $cpy) =
+ ($charbbox[$charnum]->[0], 0); # Current point (PostScript)
+
+ # Iterate over every line in the current file.
+ open (PSFILE, "<$psfile") || die "${progname}: $! ($psfile)\n";
+ while (my $oneline=<PSFILE>) {
+ next if $oneline=~/^\%/;
+ next if $oneline=~/set/; # Fortunately, "set" never occurs on "good" lines.
+ my @arglist; # Arguments to current PostScript function
+
+ # Iterate over every token in the current line.
+ TOKENLOOP:
+ foreach my $token (split " ", $oneline) {
+ # Number: Round and push on the argument list.
+ $token =~ /^[-.\d]+$/ && do {
+ push @arglist, prec ($&);
+ next TOKENLOOP;
+ };
+
+ # curveto: Convert to vhcurveto, hvcurveto, or rrcurveto.
+ $token eq "curveto" && do {
+ my ($dx1, $dy1) = ($arglist[0] - $cpx,
+ $arglist[1] - $cpy);
+ my ($dx1n, $dx1d) = frac_approx ($dx1);
+ my ($dy1n, $dy1d) = frac_approx ($dy1);
+ $cpx += $dx1n / $dx1d;
+ $cpy += $dy1n / $dy1d;
+
+ my ($dx2, $dy2) = ($arglist[2] - $cpx,
+ $arglist[3] - $cpy);
+ my ($dx2n, $dx2d) = frac_approx ($dx2);
+ my ($dy2n, $dy2d) = frac_approx ($dy2);
+ $cpx += $dx2n / $dx2d;
+ $cpy += $dy2n / $dy2d;
+
+ my ($dx3, $dy3) = ($arglist[4] - $cpx,
+ $arglist[5] - $cpy);
+ my ($dx3n, $dx3d) = frac_approx ($dx3);
+ my ($dy3n, $dy3d) = frac_approx ($dy3);
+ $cpx += $dx3n / $dx3d;
+ $cpy += $dy3n / $dy3d;
+
+ if (!$dx1n && !$dy3n) {
+ push @fontprog, frac_string ($dy1n, $dy1d,
+ $dx2n, $dx2d,
+ $dy2n, $dy2d,
+ $dx3n, $dx3d)
+ . "vhcurveto";
+ }
+ elsif (!$dy1n && !$dx3n) {
+ push @fontprog, frac_string ($dx1n, $dx1d,
+ $dx2n, $dx2d,
+ $dy2n, $dy2d,
+ $dy3n, $dy3d)
+ . "hvcurveto";
+ }
+ else {
+ push @fontprog, frac_string ($dx1n, $dx1d,
+ $dy1n, $dy1d,
+ $dx2n, $dx2d,
+ $dy2n, $dy2d,
+ $dx3n, $dx3d,
+ $dy3n, $dy3d)
+ . "rrcurveto";
+ }
+ next TOKENLOOP;
+ };
+
+ # lineto: Convert to vlineto, hlineto, or rlineto.
+ $token eq "lineto" && do {
+ my ($dx, $dy) = ($arglist[0] - $cpx,
+ $arglist[1] - $cpy);
+ my ($dxn, $dxd) = frac_approx ($dx);
+ my ($dyn, $dyd) = frac_approx ($dy);
+ $cpx += $dxn / $dxd;
+ $cpy += $dyn / $dyd;
+
+ if (!$dxn) {
+ push @fontprog, frac_string ($dyn, $dyd)
+ . "vlineto" if $dyn;
+ }
+ elsif (!$dyn) {
+ push @fontprog, frac_string ($dxn, $dxd)
+ . "hlineto";
+ }
+ else {
+ push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd)
+ . "rlineto";
+ }
+ next TOKENLOOP;
+ };
+
+ # moveto: Convert to vmoveto, hmoveto, or rmoveto.
+ $token eq "moveto" && do {
+ my ($dx, $dy) = ($arglist[0] - $cpx,
+ $arglist[1] - $cpy);
+ my ($dxn, $dxd) = frac_approx ($dx);
+ my ($dyn, $dyd) = frac_approx ($dy);
+ $cpx += $dxn / $dxd;
+ $cpy += $dyn / $dyd;
+
+ if (!$dxn) {
+ push @fontprog, frac_string ($dyn, $dyd)
+ . "vmoveto";
+ }
+ elsif (!$dyn) {
+ push @fontprog, frac_string ($dxn, $dxd)
+ . "hmoveto";
+ }
+ else {
+ push @fontprog, frac_string ($dxn, $dxd, $dyn, $dyd)
+ . "rmoveto";
+ }
+ next TOKENLOOP;
+ };
+
+ # closepath: Output as is.
+ $token eq "closepath" && do {
+ push @fontprog, $token;
+ next TOKENLOOP;
+ };
+ }
+ }
+ close PSFILE;
+ push @fontprog, ("endchar",
+ "} ND");
+ print OUTFILE join ("\n\t", @fontprog), "\n";
+ }
+}
+
+
+# Output the final set of code for the Type 1 font.
+sub output_trailer ()
+{
+ print OUTFILE <<"ENDTRAILER";
+/.notdef {
+ 0 @{[$fontbbox[2]-$fontbbox[0]]} hsbw
+ endchar
+ } ND
+end
+end
+readonly put
+noaccess put
+dup/FontName get exch definefont pop
+mark currentfile closefile
+cleartomark
+ENDTRAILER
+}
+
+######################################################################
+
+# Parse the command line. Asterisks in the following represents
+# commands also defined by Plain Metafont.
+my %opthash = ();
+GetOptions (\%opthash,
+ "fontversion=s", # font_version
+ "comment=s", # font_comment
+ "family=s", # font_family
+ "weight=s", # font_weight
+ "fullname=s", # font_identifier (*)
+ "fixedpitch!", # font_fixed_pitch
+ "italicangle=f", # font_slant (*)
+ "underpos=f", # font_underline_position
+ "underthick=f", # font_underline_thickness
+ "name=s", # font_name
+ "uniqueid=i", # font_unique_id
+ "designsize=f", # font_size (*)
+ "encoding=s", # font_coding_scheme (*)
+ "rounding=f",
+ "bpppix=f",
+ "ffscript=s",
+ "h|help",
+ "V|version") || pod2usage(2);
+if (defined $opthash{"h"}) {
+ pod2usage(-verbose => 1,
+ -output => \*STDOUT, # Bug workaround for Pod::Usage
+ -exitval => "NOEXIT");
+ print "Please e-mail bug reports to scott+mf\@pakin.org.\n";
+ exit 1;
+}
+do {print $versionmsg; exit 1} if defined $opthash{"V"};
+pod2usage(2) if $#ARGV != 0;
+
+# Extract the filename from the command line.
+$mffile = $ARGV[0];
+my @fileparts = fileparse $mffile, ".mf";
+$filebase = $fileparts[0];
+$filedir = $fileparts[1];
+$filenoext = File::Spec->catfile ($filedir, $filebase);
+$pt1file = $filebase . ".pt1";
+$pfbfile = $filebase . ".pfb";
+
+assign_default $bpppix, $opthash{bpppix}, 0.02;
+
+# Make our first pass through the input, to set values for various options.
+$mag = 100; # Get a more precise bounding box.
+get_bboxes(1); # This might set $designsize.
+
+# Sanity-check the specified precision.
+assign_default $rounding, $opthash{rounding}, 1;
+if ($rounding<=0.0 || $rounding>1.0) {
+ die sprintf "%s: Invalid rounding amount \"%g\"; value must be a positive number no greater than 1.0\n", $progname, $rounding;
+}
+
+# Ensure that every user-definable parameter is assigned a value.
+assign_default $fontversion, $opthash{fontversion}, "001.000";
+assign_default $creationdate, scalar localtime;
+assign_default $comment, $opthash{comment}, "Font converted to Type 1 by mf2pt1, written by Scott Pakin.";
+assign_default $weight, $opthash{weight}, "Medium";
+assign_default $fixedpitch, $opthash{fixedpitch}, 0;
+assign_default $uniqueID, $opthash{uniqueid}, int(rand(1000000)) + 4000000;
+assign_default $designsize, $opthash{designsize};
+die "${progname}: a design size must be specified in $mffile or on the command line\n" if !defined $designsize;
+die "${progname}: the design size must be a positive number\n" if $designsize<=0.0;
+assign_default $underlinepos, $opthash{underpos}, -1;
+$underlinepos = round(1000*$underlinepos/$designsize);
+assign_default $underlinethick, $opthash{underthick}, 0.5;
+$underlinethick = round(1000*$underlinethick/$designsize);
+assign_default $fullname, $opthash{fullname}, $filebase;
+assign_default $familyname, $opthash{family}, $fullname;
+assign_default $italicangle, $opthash{italicangle}, 0;
+assign_default $fontname, $opthash{name}, "$familyname-$weight";
+$fontname =~ s/\s//g;
+assign_default $encoding, $opthash{encoding}, "standard";
+my $encoding_name = $encoding;
+ENCODING:
+{
+ if (-e $encoding) {
+ # Filenames take precedence over built-in encodings.
+ my @enc_array;
+ open (ENCFILE, "<$encoding") || die "${progname}: $! ($encoding)\n";
+ while (my $oneline = <ENCFILE>) {
+ $oneline =~ s/\%.*$//;
+ foreach my $word (split " ", $oneline) {
+ push @enc_array, substr($word, 1) if substr($word, 0, 1) eq "/";
+ }
+ }
+ close ENCFILE;
+ $encoding_name = substr (shift @enc_array, 1);
+ $encoding = \@enc_array;
+ last ENCODING;
+ }
+ $encoding=\@standardencoding, last ENCODING if $encoding eq "standard";
+ $encoding=\@isolatin1encoding, last ENCODING if $encoding eq "isolatin1";
+ $encoding=\@ot1encoding, last ENCODING if $encoding eq "ot1";
+ $encoding=\@t1encoding, last ENCODING if $encoding eq "t1";
+ $encoding=\@glyphname, last ENCODING if $encoding eq "asis";
+ warn "${progname}: Unknown encoding \"$encoding\"; using standard Adobe encoding\n";
+ $encoding=\@standardencoding; # Default to standard encoding
+}
+assign_default $fixedpitch, $opthash{fixedpitch}, 0;
+$fixedpitch = $fixedpitch ? "true" : "false";
+assign_default $ffscript, $opthash{ffscript};
+
+# Output the final values of all of our parameters.
+print "\n";
+print <<"PARAMVALUES";
+mf2pt1 is using the following font parameters:
+ font_version: $fontversion
+ font_comment: $comment
+ font_family: $familyname
+ font_weight: $weight
+ font_identifier: $fullname
+ font_fixed_pitch: $fixedpitch
+ font_slant: $italicangle
+ font_underline_position: $underlinepos
+ font_underline_thickness: $underlinethick
+ font_name: $fontname
+ font_unique_id: $uniqueID
+ font_size: $designsize (bp)
+ font_coding_scheme: $encoding_name
+PARAMVALUES
+ ;
+print "\n";
+
+# Scale by a factor of 1000/design size.
+$mag = 1000.0 / $designsize;
+get_bboxes(0);
+print "\n";
+
+# Output the font in disassembled format.
+open (OUTFILE, ">$pt1file") || die "${progname}: $! ($pt1file)\n";
+output_header();
+printf OUTFILE "2 index /CharStrings %d dict dup begin\n",
+ 1+scalar(grep {defined($_)} @charbbox);
+output_font_programs();
+output_trailer();
+close OUTFILE;
+unlink @charfiles;
+print "\n";
+
+# Convert from the disassembled font format to Type 1 binary format.
+if (!execute_command 0, ("t1asm", $pt1file, $pfbfile)) {
+ die "${progname}: You'll need either to install t1utils and rerun $progname or find another way to convert $pt1file to $pfbfile\n";
+ exit 1;
+}
+print "\n";
+unlink $pt1file;
+
+# Use FontForge to autohint the result.
+my $user_script = 0; # 1=script file was provided by the user; 0=created here
+if (defined $ffscript) {
+ # The user provided his own script.
+ $user_script = 1;
+}
+else {
+ # Create a FontForge script file.
+ $ffscript = $filebase . ".pe";
+ open (FFSCRIPT, ">$ffscript") || die "${progname}: $! ($ffscript)\n";
+ print FFSCRIPT <<'AUTOHINT';
+Open($1);
+SelectAll();
+RemoveOverlap();
+AddExtrema();
+Simplify(0, 2);
+CorrectDirection();
+Simplify(0, 2);
+RoundToInt();
+AutoHint();
+Generate($1);
+Quit(0);
+AUTOHINT
+ ;
+ close FFSCRIPT;
+}
+if (!execute_command 0, ("fontforge", "-script", $ffscript, $pfbfile)) {
+ warn "${progname}: You'll need to install FontForge if you want $pfbfile autohinted (not required, but strongly recommended)\n";
+}
+unlink $ffscript if !$user_script;
+print "\n";
+
+# Finish up.
+print "*** Successfully generated $pfbfile! ***\n";
+exit 0;
+
+######################################################################
+
+__END__
+
+=head1 NAME
+
+mf2pt1 - produce a PostScript Type 1 font program from a Metafont source
+
+
+=head1 SYNOPSIS
+
+mf2pt1
+[B<--help>]
+[B<--version>]
+[B<--comment>=I<string>]
+[B<--designsize>=I<number>]
+[B<--encoding>=I<encoding>]
+[B<--family>=I<name>]
+[B<-->[B<no>]B<fixedpitch>]
+[B<--fontversion>=I<MMM.mmm>]
+[B<--fullname>=I<name>]
+[B<--italicangle>=I<number>]
+[B<--name>=I<name>]
+[B<--underpos>=I<number>]
+[B<--underthick>=I<number>]
+[B<--uniqueid>=I<number>]
+[B<--weight>=I<weight>]
+[B<--rounding>=I<number>]
+[B<--bpppix>=I<number>]
+[B<--ffscript>=I<file.pe>]
+I<infile>.mf
+
+
+=head1 WARNING
+
+The B<mf2pt1> Info file is the main source of documentation for
+B<mf2pt1>. This man page is merely a brief summary.
+
+
+=head1 DESCRIPTION
+
+B<mf2pt1> facilitates producing PostScript Type 1 fonts from a
+Metafont source file. It is I<not>, as the name may imply, an
+automatic converter of arbitrary Metafont fonts to Type 1 format.
+B<mf2pt1> imposes a number of restrictions on the Metafont input. If
+these restrictions are met, B<mf2pt1> will produce valid Type 1
+output. (Actually, it produces "disassembled" Type 1; the B<t1asm>
+program from the B<t1utils> suite will convert this to a true Type 1
+font.)
+
+=head2 Usage
+
+ mf2pt1 myfont.mf
+
+=head1 OPTIONS
+
+Font parameters are best specified within a Metafont program. If
+necessary, though, command-line options can override any of these
+parameters. The B<mf2pt1> Info page, the primary source of B<mf2pt1>
+documentation, describes the following in greater detail.
+
+=over 4
+
+=item B<--help>
+
+Provide help on B<mf2pt1>'s command-line options.
+
+=item B<--version>
+
+Output the B<mf2pt1> version number, copyright, and license.
+
+=item B<--comment>=I<string>
+
+Include a font comment, usually a copyright notice.
+
+=item B<--designsize>=I<number>
+
+Specify the font design size in points.
+
+=item B<--encoding>=I<encoding>
+
+Designate the font encoding, either the name of a---typically
+F<.enc>---file which contains a PostScript font-encoding vector or one
+of C<standard> (the default), C<ot1>, C<t1>, or C<isolatin1>.
+
+=item B<--family>=I<name>
+
+Specify the font family.
+
+=item B<--fixedpitch>, B<--nofixedpitch>
+
+Assert that the font uses either monospaced (B<--fixedpitch>) or
+proportional (B<--nofixedpitch>) character widths.
+
+=item B<--fontversion>=I<MMM.mmm>
+
+Specify the font's major and minor version number.
+
+=item B<--fullname>=I<name>
+
+Designate the full font name (family plus modifiers).
+
+=item B<--italicangle>=I<number>
+
+Designate the italic angle in degrees counterclockwise from vertical.
+
+=item B<--name>=I<name>
+
+Provide the font name.
+
+=item B<--underpos>=I<number>
+
+Specify the vertical position of the underline in thousandths of the
+font height.
+
+=item B<--underthick>=I<number>
+
+Specify the thickness of the underline in thousandths of the font
+height.
+
+=item B<--uniqueid>=I<number>
+
+Specify a globally unique font identifier.
+
+=item B<--weight>=I<weight>
+
+Provide a description of the font weight (e.g., ``Heavy'').
+
+=item B<--rounding>=I<number>
+
+Specify the fraction of a font unit (0.0 < I<number> <= 1.0) to which
+to round coordinate values [default: 1.0].
+
+=item B<--bpppix>=I<number>
+
+Redefine the number of big points per pixel from 0.02 to I<number>.
+
+=item B<--ffscript>=I<file.pe>
+
+Name a script to pass to FontForge.
+
+=back
+
+
+=head1 FILES
+
+F<mf2pt1.mem> (which is generated from F<mf2pt1.mp> and F<mfplain.mp>)
+
+
+=head1 NOTES
+
+As stated in L</"WARNING">, the complete source of documentation for
+B<mf2pt1> is the Info page, not this man page.
+
+
+=head1 SEE ALSO
+
+mf(1), mpost(1), t1asm(1), fontforge(1)
+
+
+=head1 AUTHOR
+
+Scott Pakin, I<scott+mf@pakin.org>
--- /dev/null
+#!@PYTHON@
+# mutopia-index.py
+
+import fnmatch
+import getopt
+import os
+import re
+import stat
+import sys
+
+def find (pat, dir):
+ f = os.popen ('find %s -name "%s"'% (dir, pat))
+ lst = []
+ for a in f.readlines():
+ a = a[:-1]
+ lst.append (a)
+ return lst
+
+
+junk_prefix = 'out-www/'
+
+headertext= r"""
+
+<h1>LilyPond samples</h1>
+
+
+<p>You are looking at a page with some LilyPond samples. These files
+are also included in the distribution. The output is completely
+generated from the source file, without any further touch up.
+
+<p>
+
+The pictures are 90 dpi anti-aliased snapshots of the printed output.
+For a good impression of the quality print out the PDF file.
+"""
+
+headertext_nopics= r"""
+<p>No examples were found in this directory.
+"""
+
+#
+# FIXME breaks on multiple strings.
+#
+def read_lilypond_header (fn):
+ s = open (fn).read ()
+ s = re.sub ('%.*$', '', s)
+ s = re.sub ('\n', ' ', s)
+
+ dict = {}
+ m = re.search (r"""\\header\s*{([^}]*)}""", s)
+
+ if m:
+ s = m.group (1)
+ else:
+ return dict
+
+ while s:
+ m = re.search (r'''\s*(\S+)\s*=\s*"([^"]+)"''', s)
+ if m == None:
+ s = ''
+ else:
+ s = s[m.end (0):]
+ left = m.group (1)
+ right = m.group (2)
+
+ left = re.sub ('"', '', left)
+ right = re.sub ('"', '', right)
+ dict[left] = right
+
+ return dict
+
+def help ():
+ sys.stdout.write (r'''Usage: mutopia-index [OPTIONS] INFILE OUTFILE
+Generate index for mutopia.
+
+Options:
+ -h, --help print this help
+ -o, --output=FILE write output to file
+ -s, --subdirs=DIR add subdir
+ --suffix=SUF specify suffix
+
+''')
+ sys.exit (0)
+
+# ugh.
+def gen_list (inputs, file_name):
+ sys.stderr.write ("generating HTML list %s" % file_name)
+ sys.stderr.write ('\n')
+ if file_name:
+ list = open (file_name, 'w')
+ else:
+ list = sys.stdout
+ list.write ('''<html><head><title>Rendered Examples</title>
+<style type="text/css">
+hr { border:0; height:1; color: #000000; background-color: #000000; }\n
+</style>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+</head>''')
+
+ list.write ('<body bgcolor=white>\n')
+
+ if inputs:
+ list.write (headertext)
+ else:
+ list.write (headertext_nopics)
+
+ for ex in inputs:
+ print ex
+
+ (base, ext) = os.path.splitext (ex)
+ (base, ext2) = os.path.splitext (base)
+ ext = ext2 + ext
+
+ header = read_lilypond_header (ex)
+ head = header.get ('title', os.path.basename (base))
+ composer = header.get ('composer', '')
+ desc = header.get ('description', '')
+ list.write ('<hr>\n')
+ list.write ('<h1>%s</h1>\n' % head);
+ if composer:
+ list.write ('<h2>%s</h2>\n' % composer)
+ if desc:
+ list.write ('%s<p>' % desc)
+ list.write ('<ul>\n')
+
+ def list_item (file_name, desc, type, lst = list):
+ if os.path.isfile (file_name):
+ lst.write ('<li><a href="%s">%s</a>'
+ % (re.sub (junk_prefix, '', file_name), desc))
+
+ # FIXME: include warning if it uses \include
+ # files.
+
+ size = os.stat (file_name)[stat.ST_SIZE]
+ kB = (size + 512) / 1024
+ if kB:
+ lst.write (' (%s %d kB)' % (type, kB))
+ else:
+ lst.write (' (%s %d characters)'
+ % (type, size))
+ pictures = ['jpeg', 'png', 'xpm']
+ lst.write ('\n')
+ else:
+ print "cannot find" , `file_name`
+
+ list_item (base + ext, 'The input', 'ASCII')
+
+ pages_found = 0
+ for page in range (1, 100):
+ f = base + '-page%d.png' % page
+
+ if not os.path.isfile (f):
+ break
+ pages_found += 1
+ list_item (f, 'See a picture of page %d' % page, 'png')
+
+ if pages_found == 0 and os.path.exists (base + '.png'):
+ list_item (base + ".png",
+ 'See a picture', 'png')
+
+
+ list_item (base + '.pdf', 'Print', 'PDF')
+ list_item (base + '.midi', 'Listen', 'MIDI')
+ list.write ('</ul>\n');
+
+ list.write ('</body></html>\n');
+ list.close ()
+
+(options, files) = getopt.getopt (sys.argv[1:],
+ 'ho:', ['help', 'output='])
+outfile = 'examples.html'
+
+subdirs = []
+for (o, a) in options:
+ if o == '--help' or o == '-h':
+ help ()
+ elif o == '--output' or o == '-o':
+ outfile = a
+
+dirs = []
+for f in files:
+ dirs += find ('out-www', f)
+
+if not dirs:
+ dirs = ['.']
+
+allfiles = []
+
+for d in dirs:
+ allfiles += find ('*.ly', d)
+
+allfiles = [f for f in allfiles
+ if not f.endswith ('snippet-map.ly')
+ and not re.search ('lily-[0-9a-f]+', f)
+ and 'musicxml' not in f]
+
+gen_list (allfiles, outfile)
--- /dev/null
+#!@PYTHON@
+import sys
+import optparse
+import os
+import math
+
+## so we can call directly as scripts/build/output-distance.py
+me_path = os.path.abspath (os.path.split (sys.argv[0])[0])
+sys.path.insert (0, me_path + '/../python/')
+sys.path.insert (0, me_path + '/../python/out/')
+
+
+X_AXIS = 0
+Y_AXIS = 1
+INFTY = 1e6
+
+OUTPUT_EXPRESSION_PENALTY = 1
+ORPHAN_GROB_PENALTY = 1
+options = None
+
+################################################################
+# system interface.
+temp_dir = None
+class TempDirectory:
+ def __init__ (self):
+ import tempfile
+ self.dir = tempfile.mkdtemp ()
+ print 'dir is', self.dir
+ def __del__ (self):
+ print 'rm -rf %s' % self.dir
+ os.system ('rm -rf %s' % self.dir)
+ def __call__ (self):
+ return self.dir
+
+
+def get_temp_dir ():
+ global temp_dir
+ if not temp_dir:
+ temp_dir = TempDirectory ()
+ return temp_dir ()
+
+def read_pipe (c):
+ print 'pipe' , c
+ return os.popen (c).read ()
+
+def system (c):
+ print 'system' , c
+ s = os.system (c)
+ if s :
+ raise Exception ("failed")
+ return
+
+def shorten_string (s):
+ threshold = 15
+ if len (s) > 2*threshold:
+ s = s[:threshold] + '..' + s[-threshold:]
+ return s
+
+def max_distance (x1, x2):
+ dist = 0.0
+
+ for (p,q) in zip (x1, x2):
+ dist = max (abs (p-q), dist)
+
+ return dist
+
+
+def compare_png_images (old, new, dest_dir):
+ def png_dims (f):
+ m = re.search ('([0-9]+) x ([0-9]+)', read_pipe ('file %s' % f))
+
+ return tuple (map (int, m.groups ()))
+
+ dest = os.path.join (dest_dir, new.replace ('.png', '.compare.jpeg'))
+ try:
+ dims1 = png_dims (old)
+ dims2 = png_dims (new)
+ except AttributeError:
+ ## hmmm. what to do?
+ system ('touch %(dest)s' % locals ())
+ return
+
+ dims = (min (dims1[0], dims2[0]),
+ min (dims1[1], dims2[1]))
+
+ dir = get_temp_dir ()
+ system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop1.png' % (dims + (old, dir)))
+ system ('convert -depth 8 -crop %dx%d+0+0 %s %s/crop2.png' % (dims + (new, dir)))
+
+ system ('compare -depth 8 %(dir)s/crop1.png %(dir)s/crop2.png %(dir)s/diff.png' % locals ())
+
+ system ("convert -depth 8 %(dir)s/diff.png -blur 0x3 -negate -channel alpha,blue -type TrueColorMatte -fx 'intensity' %(dir)s/matte.png" % locals ())
+
+ system ("composite -compose atop -quality 65 %(dir)s/matte.png %(new)s %(dest)s" % locals ())
+
+
+################################################################
+# interval/bbox arithmetic.
+
+empty_interval = (INFTY, -INFTY)
+empty_bbox = (empty_interval, empty_interval)
+
+def interval_is_empty (i):
+ return i[0] > i[1]
+
+def interval_length (i):
+ return max (i[1]-i[0], 0)
+
+def interval_union (i1, i2):
+ return (min (i1[0], i2[0]),
+ max (i1[1], i2[1]))
+
+def interval_intersect (i1, i2):
+ return (max (i1[0], i2[0]),
+ min (i1[1], i2[1]))
+
+def bbox_is_empty (b):
+ return (interval_is_empty (b[0])
+ or interval_is_empty (b[1]))
+
+def bbox_union (b1, b2):
+ return (interval_union (b1[X_AXIS], b2[X_AXIS]),
+ interval_union (b2[Y_AXIS], b2[Y_AXIS]))
+
+def bbox_intersection (b1, b2):
+ return (interval_intersect (b1[X_AXIS], b2[X_AXIS]),
+ interval_intersect (b2[Y_AXIS], b2[Y_AXIS]))
+
+def bbox_area (b):
+ return interval_length (b[X_AXIS]) * interval_length (b[Y_AXIS])
+
+def bbox_diameter (b):
+ return max (interval_length (b[X_AXIS]),
+ interval_length (b[Y_AXIS]))
+
+
+def difference_area (a, b):
+ return bbox_area (a) - bbox_area (bbox_intersection (a,b))
+
+class GrobSignature:
+ def __init__ (self, exp_list):
+ (self.name, self.origin, bbox_x,
+ bbox_y, self.output_expression) = tuple (exp_list)
+
+ self.bbox = (bbox_x, bbox_y)
+ self.centroid = (bbox_x[0] + bbox_x[1], bbox_y[0] + bbox_y[1])
+
+ def __repr__ (self):
+ return '%s: (%.2f,%.2f), (%.2f,%.2f)\n' % (self.name,
+ self.bbox[0][0],
+ self.bbox[0][1],
+ self.bbox[1][0],
+ self.bbox[1][1])
+
+ def axis_centroid (self, axis):
+ return apply (sum, self.bbox[axis]) / 2
+
+ def centroid_distance (self, other, scale):
+ return max_distance (self.centroid, other.centroid) / scale
+
+ def bbox_distance (self, other):
+ divisor = bbox_area (self.bbox) + bbox_area (other.bbox)
+
+ if divisor:
+ return (difference_area (self.bbox, other.bbox) +
+ difference_area (other.bbox, self.bbox)) / divisor
+ else:
+ return 0.0
+
+ def expression_distance (self, other):
+ if self.output_expression == other.output_expression:
+ return 0
+ else:
+ return 1
+
+################################################################
+# single System.
+
+class SystemSignature:
+ def __init__ (self, grob_sigs):
+ d = {}
+ for g in grob_sigs:
+ val = d.setdefault (g.name, [])
+ val += [g]
+
+ self.grob_dict = d
+ self.set_all_bbox (grob_sigs)
+
+ def set_all_bbox (self, grobs):
+ self.bbox = empty_bbox
+ for g in grobs:
+ self.bbox = bbox_union (g.bbox, self.bbox)
+
+ def closest (self, grob_name, centroid):
+ min_d = INFTY
+ min_g = None
+ try:
+ grobs = self.grob_dict[grob_name]
+
+ for g in grobs:
+ d = max_distance (g.centroid, centroid)
+ if d < min_d:
+ min_d = d
+ min_g = g
+
+
+ return min_g
+
+ except KeyError:
+ return None
+ def grobs (self):
+ return reduce (lambda x,y: x+y, self.grob_dict.values(), [])
+
+################################################################
+## comparison of systems.
+
+class SystemLink:
+ def __init__ (self, system1, system2):
+ self.system1 = system1
+ self.system2 = system2
+
+ self.link_list_dict = {}
+ self.back_link_dict = {}
+
+
+ ## pairs
+ self.orphans = []
+
+ ## pair -> distance
+ self.geo_distances = {}
+
+ ## pairs
+ self.expression_changed = []
+
+ self._geometric_distance = None
+ self._expression_change_count = None
+ self._orphan_count = None
+
+ for g in system1.grobs ():
+
+ ## skip empty bboxes.
+ if bbox_is_empty (g.bbox):
+ continue
+
+ closest = system2.closest (g.name, g.centroid)
+
+ self.link_list_dict.setdefault (closest, [])
+ self.link_list_dict[closest].append (g)
+ self.back_link_dict[g] = closest
+
+
+ def calc_geometric_distance (self):
+ total = 0.0
+ for (g1,g2) in self.back_link_dict.items ():
+ if g2:
+ d = g1.bbox_distance (g2)
+ if d:
+ self.geo_distances[(g1,g2)] = d
+
+ total += d
+
+ self._geometric_distance = total
+
+ def calc_orphan_count (self):
+ count = 0
+ for (g1, g2) in self.back_link_dict.items ():
+ if g2 == None:
+ self.orphans.append ((g1, None))
+
+ count += 1
+
+ self._orphan_count = count
+
+ def calc_output_exp_distance (self):
+ d = 0
+ for (g1,g2) in self.back_link_dict.items ():
+ if g2:
+ d += g1.expression_distance (g2)
+
+ self._expression_change_count = d
+
+ def output_expression_details_string (self):
+ return ', '.join ([g1.name for g1 in self.expression_changed])
+
+ def geo_details_string (self):
+ results = [(d, g1,g2) for ((g1, g2), d) in self.geo_distances.items()]
+ results.sort ()
+ results.reverse ()
+
+ return ', '.join (['%s: %f' % (g1.name, d) for (d, g1, g2) in results])
+
+ def orphan_details_string (self):
+ return ', '.join (['%s-None' % g1.name for (g1,g2) in self.orphans if g2==None])
+
+ def geometric_distance (self):
+ if self._geometric_distance == None:
+ self.calc_geometric_distance ()
+ return self._geometric_distance
+
+ def orphan_count (self):
+ if self._orphan_count == None:
+ self.calc_orphan_count ()
+
+ return self._orphan_count
+
+ def output_expression_change_count (self):
+ if self._expression_change_count == None:
+ self.calc_output_exp_distance ()
+ return self._expression_change_count
+
+ def distance (self):
+ return (self.output_expression_change_count (),
+ self.orphan_count (),
+ self.geometric_distance ())
+
+def read_signature_file (name):
+ print 'reading', name
+
+ entries = open (name).read ().split ('\n')
+ def string_to_tup (s):
+ return tuple (map (float, s.split (' ')))
+
+ def string_to_entry (s):
+ fields = s.split('@')
+ fields[2] = string_to_tup (fields[2])
+ fields[3] = string_to_tup (fields[3])
+
+ return tuple (fields)
+
+ entries = [string_to_entry (e) for e in entries
+ if e and not e.startswith ('#')]
+
+ grob_sigs = [GrobSignature (e) for e in entries]
+ sig = SystemSignature (grob_sigs)
+ return sig
+
+
+################################################################
+# different systems of a .ly file.
+
+hash_to_original_name = {}
+
+class FileLink:
+ def __init__ (self, f1, f2):
+ self._distance = None
+ self.file_names = (f1, f2)
+
+ def text_record_string (self):
+ return '%-30f %-20s\n' % (self.distance (),
+ self.name ()
+ + os.path.splitext (self.file_names[1])[1]
+ )
+
+ def calc_distance (self):
+ return 0.0
+
+ def distance (self):
+ if self._distance == None:
+ self._distance = self.calc_distance ()
+
+ return self._distance
+
+ def source_file (self):
+ for ext in ('.ly', '.ly.txt'):
+ base = os.path.splitext (self.file_names[1])[0]
+ f = base + ext
+ if os.path.exists (f):
+ return f
+
+ return ''
+
+ def name (self):
+ base = os.path.basename (self.file_names[1])
+ base = os.path.splitext (base)[0]
+ base = hash_to_original_name.get (base, base)
+ base = os.path.splitext (base)[0]
+ return base
+
+ def extension (self):
+ return os.path.splitext (self.file_names[1])[1]
+
+ def link_files_for_html (self, dest_dir):
+ for f in self.file_names:
+ link_file (f, os.path.join (dest_dir, f))
+
+ def get_distance_details (self):
+ return ''
+
+ def get_cell (self, oldnew):
+ return ''
+
+ def get_file (self, oldnew):
+ return self.file_names[oldnew]
+
+ def html_record_string (self, dest_dir):
+ dist = self.distance()
+
+ details = self.get_distance_details ()
+ if details:
+ details_base = os.path.splitext (self.file_names[1])[0]
+ details_base += '.details.html'
+ fn = dest_dir + '/' + details_base
+ open_write_file (fn).write (details)
+
+ details = '<br>(<a href="%(details_base)s">details</a>)' % locals ()
+
+ cell1 = self.get_cell (0)
+ cell2 = self.get_cell (1)
+
+ name = self.name () + self.extension ()
+ file1 = self.get_file (0)
+ file2 = self.get_file (1)
+
+ return '''<tr>
+<td>
+%(dist)f
+%(details)s
+</td>
+<td>%(cell1)s<br><font size=-2><a href="%(file1)s"><tt>%(name)s</tt></font></td>
+<td>%(cell2)s<br><font size=-2><a href="%(file2)s"><tt>%(name)s</tt></font></td>
+</tr>''' % locals ()
+
+
+class FileCompareLink (FileLink):
+ def __init__ (self, f1, f2):
+ FileLink.__init__ (self, f1, f2)
+ self.contents = (self.get_content (self.file_names[0]),
+ self.get_content (self.file_names[1]))
+
+
+ def calc_distance (self):
+ ## todo: could use import MIDI to pinpoint
+ ## what & where changed.
+
+ if self.contents[0] == self.contents[1]:
+ return 0.0
+ else:
+ return 100.0;
+
+ def get_content (self, f):
+ print 'reading', f
+ s = open (f).read ()
+ return s
+
+
+class GitFileCompareLink (FileCompareLink):
+ def get_cell (self, oldnew):
+ str = self.contents[oldnew]
+
+ # truncate long lines
+ str = '\n'.join ([l[:80] for l in str.split ('\n')])
+
+
+ str = '<font size="-2"><pre>%s</pre></font>' % str
+ return str
+
+ def calc_distance (self):
+ if self.contents[0] == self.contents[1]:
+ d = 0.0
+ else:
+ d = 1.0001 *options.threshold
+
+ return d
+
+
+class TextFileCompareLink (FileCompareLink):
+ def calc_distance (self):
+ import difflib
+ diff = difflib.unified_diff (self.contents[0].strip().split ('\n'),
+ self.contents[1].strip().split ('\n'),
+ fromfiledate = self.file_names[0],
+ tofiledate = self.file_names[1]
+ )
+
+ self.diff_lines = [l for l in diff]
+ self.diff_lines = self.diff_lines[2:]
+
+ return math.sqrt (float (len ([l for l in self.diff_lines if l[0] in '-+'])))
+
+ def get_cell (self, oldnew):
+ str = ''
+ if oldnew == 1:
+ str = '\n'.join ([d.replace ('\n','') for d in self.diff_lines])
+ str = '<font size="-2"><pre>%s</pre></font>' % str
+ return str
+
+class LogFileCompareLink (TextFileCompareLink):
+ def get_content (self, f):
+ c = TextFileCompareLink.get_content (self, f)
+ c = re.sub ("\nProcessing `[^\n]+'\n", '', c)
+ return c
+
+class ProfileFileLink (FileCompareLink):
+ def __init__ (self, f1, f2):
+ FileCompareLink.__init__ (self, f1, f2)
+ self.results = [{}, {}]
+
+ def get_cell (self, oldnew):
+ str = ''
+ for k in ('time', 'cells'):
+ if oldnew==0:
+ str += '%-8s: %d\n' % (k, int (self.results[oldnew][k]))
+ else:
+ str += '%-8s: %8d (%5.3f)\n' % (k, int (self.results[oldnew][k]),
+ self.get_ratio (k))
+
+ return '<pre>%s</pre>' % str
+
+ def get_ratio (self, key):
+ (v1,v2) = (self.results[0].get (key, -1),
+ self.results[1].get (key, -1))
+
+ if v1 <= 0 or v2 <= 0:
+ return 0.0
+
+ return (v1 - v2) / float (v1+v2)
+
+ def calc_distance (self):
+ for oldnew in (0,1):
+ def note_info (m):
+ self.results[oldnew][m.group(1)] = float (m.group (2))
+
+ re.sub ('([a-z]+): ([-0-9.]+)\n',
+ note_info, self.contents[oldnew])
+
+ dist = 0.0
+ factor = {
+ 'time': 0.1,
+ 'cells': 5.0,
+ }
+
+ for k in ('time', 'cells'):
+ real_val = math.tan (self.get_ratio (k) * 0.5 * math.pi)
+ dist += math.exp (math.fabs (real_val) * factor[k]) - 1
+
+ dist = min (dist, 100)
+ return dist
+
+
+class MidiFileLink (TextFileCompareLink):
+ def get_content (self, oldnew):
+ import midi
+
+ data = FileCompareLink.get_content (self, oldnew)
+ midi = midi.parse (data)
+ tracks = midi[1]
+
+ str = ''
+ j = 0
+ for t in tracks:
+ str += 'track %d' % j
+ j += 1
+
+ for e in t:
+ ev_str = repr (e)
+ if re.search ('LilyPond [0-9.]+', ev_str):
+ continue
+
+ str += ' ev %s\n' % `e`
+ return str
+
+
+
+class SignatureFileLink (FileLink):
+ def __init__ (self, f1, f2 ):
+ FileLink.__init__ (self, f1, f2)
+ self.system_links = {}
+
+ def add_system_link (self, link, number):
+ self.system_links[number] = link
+
+ def calc_distance (self):
+ d = 0.0
+
+ orphan_distance = 0.0
+ for l in self.system_links.values ():
+ d = max (d, l.geometric_distance ())
+ orphan_distance += l.orphan_count ()
+
+ return d + orphan_distance
+
+ def add_file_compare (self, f1, f2):
+ system_index = []
+
+ def note_system_index (m):
+ system_index.append (int (m.group (1)))
+ return ''
+
+ base1 = re.sub ("-([0-9]+).signature", note_system_index, f1)
+ base2 = re.sub ("-([0-9]+).signature", note_system_index, f2)
+
+ self.base_names = (os.path.normpath (base1),
+ os.path.normpath (base2))
+
+ s1 = read_signature_file (f1)
+ s2 = read_signature_file (f2)
+
+ link = SystemLink (s1, s2)
+
+ self.add_system_link (link, system_index[0])
+
+
+ def create_images (self, dest_dir):
+
+ files_created = [[], []]
+ for oldnew in (0, 1):
+ pat = self.base_names[oldnew] + '.eps'
+
+ for f in glob.glob (pat):
+ infile = f
+ outfile = (dest_dir + '/' + f).replace ('.eps', '.png')
+ data_option = ''
+ if options.local_data_dir:
+ data_option = ('-slilypond-datadir=%s/../share/lilypond/current '
+ % os.path.dirname(infile))
+
+ mkdir (os.path.split (outfile)[0])
+ cmd = ('gs -sDEVICE=png16m -dGraphicsAlphaBits=4 -dTextAlphaBits=4 '
+ ' %(data_option)s '
+ ' -r101 '
+ ' -sOutputFile=%(outfile)s -dNOSAFER -dEPSCrop -q -dNOPAUSE '
+ ' %(infile)s -c quit ') % locals ()
+
+ files_created[oldnew].append (outfile)
+ system (cmd)
+
+ return files_created
+
+ def link_files_for_html (self, dest_dir):
+ FileLink.link_files_for_html (self, dest_dir)
+ to_compare = [[], []]
+
+ exts = []
+ if options.create_images:
+ to_compare = self.create_images (dest_dir)
+ else:
+ exts += ['.png', '-page*png']
+
+ for ext in exts:
+ for oldnew in (0,1):
+ for f in glob.glob (self.base_names[oldnew] + ext):
+ dst = dest_dir + '/' + f
+ link_file (f, dst)
+
+ if f.endswith ('.png'):
+ to_compare[oldnew].append (f)
+
+ if options.compare_images:
+ for (old, new) in zip (to_compare[0], to_compare[1]):
+ compare_png_images (old, new, dest_dir)
+
+
+ def get_cell (self, oldnew):
+ def img_cell (ly, img, name):
+ if not name:
+ name = 'source'
+ else:
+ name = '<tt>%s</tt>' % name
+
+ return '''
+<a href="%(img)s">
+<img src="%(img)s" style="border-style: none; max-width: 500px;">
+</a><br>
+''' % locals ()
+ def multi_img_cell (ly, imgs, name):
+ if not name:
+ name = 'source'
+ else:
+ name = '<tt>%s</tt>' % name
+
+ imgs_str = '\n'.join (['''<a href="%s">
+<img src="%s" style="border-style: none; max-width: 500px;">
+</a><br>''' % (img, img)
+ for img in imgs])
+
+
+ return '''
+%(imgs_str)s
+''' % locals ()
+
+
+
+ def cell (base, name):
+ pat = base + '-page*.png'
+ pages = glob.glob (pat)
+
+ if pages:
+ return multi_img_cell (base + '.ly', sorted (pages), name)
+ else:
+ return img_cell (base + '.ly', base + '.png', name)
+
+
+
+ str = cell (os.path.splitext (self.file_names[oldnew])[0], self.name ())
+ if options.compare_images and oldnew == 1:
+ str = str.replace ('.png', '.compare.jpeg')
+
+ return str
+
+
+ def get_distance_details (self):
+ systems = self.system_links.items ()
+ systems.sort ()
+
+ html = ""
+ for (c, link) in systems:
+ e = '<td>%d</td>' % c
+ for d in link.distance ():
+ e += '<td>%f</td>' % d
+
+ e = '<tr>%s</tr>' % e
+
+ html += e
+
+ e = '<td>%d</td>' % c
+ for s in (link.output_expression_details_string (),
+ link.orphan_details_string (),
+ link.geo_details_string ()):
+ e += "<td>%s</td>" % s
+
+
+ e = '<tr>%s</tr>' % e
+ html += e
+
+ original = self.name ()
+ html = '''<html>
+<head>
+<title>comparison details for %(original)s</title>
+</head>
+<body>
+<table border=1>
+<tr>
+<th>system</th>
+<th>output</th>
+<th>orphan</th>
+<th>geo</th>
+</tr>
+
+%(html)s
+</table>
+
+</body>
+</html>
+''' % locals ()
+ return html
+
+
+################################################################
+# Files/directories
+
+import glob
+import re
+
+def compare_signature_files (f1, f2):
+ s1 = read_signature_file (f1)
+ s2 = read_signature_file (f2)
+
+ return SystemLink (s1, s2).distance ()
+
+def paired_files (dir1, dir2, pattern):
+ """
+ Search DIR1 and DIR2 for PATTERN.
+
+ Return (PAIRED, MISSING-FROM-2, MISSING-FROM-1)
+
+ """
+
+ files = []
+ for d in (dir1,dir2):
+ found = [os.path.split (f)[1] for f in glob.glob (d + '/' + pattern)]
+ found = dict ((f, 1) for f in found)
+ files.append (found)
+
+ pairs = []
+ missing = []
+ for f in files[0]:
+ try:
+ files[1].pop (f)
+ pairs.append (f)
+ except KeyError:
+ missing.append (f)
+
+ return (pairs, files[1].keys (), missing)
+
+class ComparisonData:
+ def __init__ (self):
+ self.result_dict = {}
+ self.missing = []
+ self.added = []
+ self.file_links = {}
+
+ def read_sources (self):
+
+ ## ugh: drop the .ly.txt
+ for (key, val) in self.file_links.items ():
+
+ def note_original (match, ln=val):
+ key = ln.name ()
+ hash_to_original_name[key] = match.group (1)
+ return ''
+
+ sf = val.source_file ()
+ if sf:
+ re.sub (r'\\sourcefilename "([^"]+)"',
+ note_original, open (sf).read ())
+ else:
+ print 'no source for', val
+
+ def compare_trees (self, dir1, dir2):
+ self.compare_directories (dir1, dir2)
+
+ (root, dirs, files) = os.walk (dir1).next ()
+ for d in dirs:
+ d1 = os.path.join (dir1, d)
+ d2 = os.path.join (dir2, d)
+
+ if os.path.islink (d1) or os.path.islink (d2):
+ continue
+
+ if os.path.isdir (d2):
+ self.compare_trees (d1, d2)
+
+ def compare_directories (self, dir1, dir2):
+ for ext in ['signature',
+ 'midi',
+ 'log',
+ 'profile',
+ 'gittxt']:
+ (paired, m1, m2) = paired_files (dir1, dir2, '*.' + ext)
+
+ self.missing += [(dir1, m) for m in m1]
+ self.added += [(dir2, m) for m in m2]
+
+ for p in paired:
+ if (options.max_count
+ and len (self.file_links) > options.max_count):
+ continue
+
+ f2 = dir2 + '/' + p
+ f1 = dir1 + '/' + p
+ self.compare_files (f1, f2)
+
+ def compare_files (self, f1, f2):
+ if f1.endswith ('signature'):
+ self.compare_signature_files (f1, f2)
+ else:
+ ext = os.path.splitext (f1)[1]
+ klasses = {
+ '.midi': MidiFileLink,
+ '.log' : LogFileCompareLink,
+ '.profile': ProfileFileLink,
+ '.gittxt': GitFileCompareLink,
+ }
+
+ if klasses.has_key (ext):
+ self.compare_general_files (klasses[ext], f1, f2)
+
+ def compare_general_files (self, klass, f1, f2):
+ name = os.path.split (f1)[1]
+
+ file_link = klass (f1, f2)
+ self.file_links[name] = file_link
+
+ def compare_signature_files (self, f1, f2):
+ name = os.path.split (f1)[1]
+ name = re.sub ('-[0-9]+.signature', '', name)
+
+ file_link = None
+ try:
+ file_link = self.file_links[name]
+ except KeyError:
+ generic_f1 = re.sub ('-[0-9]+.signature', '.ly', f1)
+ generic_f2 = re.sub ('-[0-9]+.signature', '.ly', f2)
+ file_link = SignatureFileLink (generic_f1, generic_f2)
+ self.file_links[name] = file_link
+
+ file_link.add_file_compare (f1, f2)
+
+ def write_changed (self, dest_dir, threshold):
+ (changed, below, unchanged) = self.thresholded_results (threshold)
+
+ str = '\n'.join ([os.path.splitext (link.file_names[1])[0]
+ for link in changed])
+ fn = dest_dir + '/changed.txt'
+
+ open_write_file (fn).write (str)
+
+ def thresholded_results (self, threshold):
+ ## todo: support more scores.
+ results = [(link.distance(), link)
+ for link in self.file_links.values ()]
+ results.sort ()
+ results.reverse ()
+
+ unchanged = [r for (d,r) in results if d == 0.0]
+ below = [r for (d,r) in results if threshold >= d > 0.0]
+ changed = [r for (d,r) in results if d > threshold]
+
+ return (changed, below, unchanged)
+
+ def write_text_result_page (self, filename, threshold):
+ out = None
+ if filename == '':
+ out = sys.stdout
+ else:
+ print 'writing "%s"' % filename
+ out = open_write_file (filename)
+
+ (changed, below, unchanged) = self.thresholded_results (threshold)
+
+
+ for link in changed:
+ out.write (link.text_record_string ())
+
+ out.write ('\n\n')
+ out.write ('%d below threshold\n' % len (below))
+ out.write ('%d unchanged\n' % len (unchanged))
+
+ def create_text_result_page (self, dir1, dir2, dest_dir, threshold):
+ self.write_text_result_page (dest_dir + '/index.txt', threshold)
+
+ def create_html_result_page (self, dir1, dir2, dest_dir, threshold):
+ dir1 = dir1.replace ('//', '/')
+ dir2 = dir2.replace ('//', '/')
+
+ (changed, below, unchanged) = self.thresholded_results (threshold)
+
+
+ html = ''
+ old_prefix = os.path.split (dir1)[1]
+ for link in changed:
+ html += link.html_record_string (dest_dir)
+
+
+ short_dir1 = shorten_string (dir1)
+ short_dir2 = shorten_string (dir2)
+ html = '''<html>
+<table rules="rows" border bordercolor="blue">
+<tr>
+<th>distance</th>
+<th>%(short_dir1)s</th>
+<th>%(short_dir2)s</th>
+</tr>
+%(html)s
+</table>
+</html>''' % locals()
+
+ html += ('<p>')
+ below_count = len (below)
+
+ if below_count:
+ html += ('<p>%d below threshold</p>' % below_count)
+
+ html += ('<p>%d unchanged</p>' % len (unchanged))
+
+ dest_file = dest_dir + '/index.html'
+ open_write_file (dest_file).write (html)
+
+
+ for link in changed:
+ link.link_files_for_html (dest_dir)
+
+
+ def print_results (self, threshold):
+ self.write_text_result_page ('', threshold)
+
+def compare_trees (dir1, dir2, dest_dir, threshold):
+ data = ComparisonData ()
+ data.compare_trees (dir1, dir2)
+ data.read_sources ()
+
+
+ data.print_results (threshold)
+
+ if os.path.isdir (dest_dir):
+ system ('rm -rf %s '% dest_dir)
+
+ data.write_changed (dest_dir, threshold)
+ data.create_html_result_page (dir1, dir2, dest_dir, threshold)
+ data.create_text_result_page (dir1, dir2, dest_dir, threshold)
+
+################################################################
+# TESTING
+
+def mkdir (x):
+ if not os.path.isdir (x):
+ print 'mkdir', x
+ os.makedirs (x)
+
+def link_file (x, y):
+ mkdir (os.path.split (y)[0])
+ try:
+ print x, '->', y
+ os.link (x, y)
+ except OSError, z:
+ print 'OSError', x, y, z
+ raise OSError
+
+def open_write_file (x):
+ d = os.path.split (x)[0]
+ mkdir (d)
+ return open (x, 'w')
+
+
+def system (x):
+
+ print 'invoking', x
+ stat = os.system (x)
+ assert stat == 0
+
+
+def test_paired_files ():
+ print paired_files (os.environ["HOME"] + "/src/lilypond/scripts/",
+ os.environ["HOME"] + "/src/lilypond-stable/scripts/build/", '*.py')
+
+
+def test_compare_trees ():
+ system ('rm -rf dir1 dir2')
+ system ('mkdir dir1 dir2')
+ system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir1')
+ system ('cp 20{-*.signature,.ly,.png,.eps,.log,.profile} dir2')
+ system ('cp 20expr{-*.signature,.ly,.png,.eps,.log,.profile} dir1')
+ system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir2/')
+ system ('cp 19{-*.signature,.ly,.png,.eps,.log,.profile} dir1/')
+ system ('cp 19-1.signature 19.sub-1.signature')
+ system ('cp 19.ly 19.sub.ly')
+ system ('cp 19.profile 19.sub.profile')
+ system ('cp 19.log 19.sub.log')
+ system ('cp 19.png 19.sub.png')
+ system ('cp 19.eps 19.sub.eps')
+
+ system ('cp 20multipage* dir1')
+ system ('cp 20multipage* dir2')
+ system ('cp 19multipage-1.signature dir2/20multipage-1.signature')
+
+
+ system ('mkdir -p dir1/subdir/ dir2/subdir/')
+ system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir1/subdir/')
+ system ('cp 19.sub{-*.signature,.ly,.png,.eps,.log,.profile} dir2/subdir/')
+ system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir2/')
+ system ('cp 20grob{-*.signature,.ly,.png,.eps,.log,.profile} dir1/')
+ system ('echo HEAD is 1 > dir1/tree.gittxt')
+ system ('echo HEAD is 2 > dir2/tree.gittxt')
+
+ ## introduce differences
+ system ('cp 19-1.signature dir2/20-1.signature')
+ system ('cp 19.profile dir2/20.profile')
+ system ('cp 19.png dir2/20.png')
+ system ('cp 19multipage-page1.png dir2/20multipage-page1.png')
+ system ('cp 20-1.signature dir2/subdir/19.sub-1.signature')
+ system ('cp 20.png dir2/subdir/19.sub.png')
+ system ("sed 's/: /: 1/g' 20.profile > dir2/subdir/19.sub.profile")
+
+ ## radical diffs.
+ system ('cp 19-1.signature dir2/20grob-1.signature')
+ system ('cp 19-1.signature dir2/20grob-2.signature')
+ system ('cp 19multipage.midi dir1/midi-differ.midi')
+ system ('cp 20multipage.midi dir2/midi-differ.midi')
+ system ('cp 19multipage.log dir1/log-differ.log')
+ system ('cp 19multipage.log dir2/log-differ.log && echo different >> dir2/log-differ.log && echo different >> dir2/log-differ.log')
+
+ compare_trees ('dir1', 'dir2', 'compare-dir1dir2', options.threshold)
+
+
+def test_basic_compare ():
+ ly_template = r"""
+
+\version "2.10.0"
+#(define default-toplevel-book-handler
+ print-book-with-defaults-as-systems )
+
+#(ly:set-option (quote no-point-and-click))
+
+\sourcefilename "my-source.ly"
+
+%(papermod)s
+\header { tagline = ##f }
+\score {
+<<
+\new Staff \relative c {
+ c4^"%(userstring)s" %(extragrob)s
+ }
+\new Staff \relative c {
+ c4^"%(userstring)s" %(extragrob)s
+ }
+>>
+\layout{}
+}
+
+"""
+
+ dicts = [{ 'papermod' : '',
+ 'name' : '20',
+ 'extragrob': '',
+ 'userstring': 'test' },
+ { 'papermod' : '#(set-global-staff-size 19.5)',
+ 'name' : '19',
+ 'extragrob': '',
+ 'userstring': 'test' },
+ { 'papermod' : '',
+ 'name' : '20expr',
+ 'extragrob': '',
+ 'userstring': 'blabla' },
+ { 'papermod' : '',
+ 'name' : '20grob',
+ 'extragrob': 'r2. \\break c1',
+ 'userstring': 'test' },
+ ]
+
+ for d in dicts:
+ open (d['name'] + '.ly','w').write (ly_template % d)
+
+ names = [d['name'] for d in dicts]
+
+ system ('lilypond -ddump-profile -dseparate-log-files -ddump-signatures --png -dbackend=eps ' + ' '.join (names))
+
+
+ multipage_str = r'''
+ #(set-default-paper-size "a6")
+ \score {
+ \relative {c1 \pageBreak c1 }
+ \layout {}
+ \midi {}
+ }
+ '''
+
+ open ('20multipage.ly', 'w').write (multipage_str.replace ('c1', 'd1'))
+ open ('19multipage.ly', 'w').write ('#(set-global-staff-size 19.5)\n' + multipage_str)
+ system ('lilypond -dseparate-log-files -ddump-signatures --png 19multipage 20multipage ')
+
+ test_compare_signatures (names)
+
+def test_compare_signatures (names, timing=False):
+ import time
+
+ times = 1
+ if timing:
+ times = 100
+
+ t0 = time.clock ()
+
+ count = 0
+ for t in range (0, times):
+ sigs = dict ((n, read_signature_file ('%s-1.signature' % n)) for n in names)
+ count += 1
+
+ if timing:
+ print 'elapsed', (time.clock() - t0)/count
+
+
+ t0 = time.clock ()
+ count = 0
+ combinations = {}
+ for (n1, s1) in sigs.items():
+ for (n2, s2) in sigs.items():
+ combinations['%s-%s' % (n1, n2)] = SystemLink (s1,s2).distance ()
+ count += 1
+
+ if timing:
+ print 'elapsed', (time.clock() - t0)/count
+
+ results = combinations.items ()
+ results.sort ()
+ for k,v in results:
+ print '%-20s' % k, v
+
+ assert combinations['20-20'] == (0.0,0.0,0.0)
+ assert combinations['20-20expr'][0] > 0.0
+ assert combinations['20-19'][2] < 10.0
+ assert combinations['20-19'][2] > 0.0
+
+
+def run_tests ():
+ dir = 'test-output-distance'
+
+ do_clean = not os.path.exists (dir)
+
+ print 'test results in ', dir
+ if do_clean:
+ system ('rm -rf ' + dir)
+ system ('mkdir ' + dir)
+
+ os.chdir (dir)
+ if do_clean:
+ test_basic_compare ()
+
+ test_compare_trees ()
+
+################################################################
+#
+
+def main ():
+ p = optparse.OptionParser ("output-distance - compare LilyPond formatting runs")
+ p.usage = 'output-distance.py [options] tree1 tree2'
+
+ p.add_option ('', '--test-self',
+ dest="run_test",
+ action="store_true",
+ help='run test method')
+
+ p.add_option ('--max-count',
+ dest="max_count",
+ metavar="COUNT",
+ type="int",
+ default=0,
+ action="store",
+ help='only analyze COUNT signature pairs')
+
+ p.add_option ('', '--threshold',
+ dest="threshold",
+ default=0.3,
+ action="store",
+ type="float",
+ help='threshold for geometric distance')
+
+ p.add_option ('--no-compare-images',
+ dest="compare_images",
+ default=True,
+ action="store_false",
+ help="Don't run graphical comparisons")
+
+ p.add_option ('--create-images',
+ dest="create_images",
+ default=False,
+ action="store_true",
+ help="Create PNGs from EPSes")
+
+
+ p.add_option ('--local-datadir',
+ dest="local_data_dir",
+ default=False,
+ action="store_true",
+ help='whether to use the share/lilypond/ directory in the test directory')
+
+ p.add_option ('-o', '--output-dir',
+ dest="output_dir",
+ default=None,
+ action="store",
+ type="string",
+ help='where to put the test results [tree2/compare-tree1tree2]')
+
+ global options
+ (options, args) = p.parse_args ()
+
+ if options.run_test:
+ run_tests ()
+ sys.exit (0)
+
+ if len (args) != 2:
+ p.print_usage()
+ sys.exit (2)
+
+ name = options.output_dir
+ if not name:
+ name = args[0].replace ('/', '')
+ name = os.path.join (args[1], 'compare-' + shorten_string (name))
+
+ compare_trees (args[0], args[1], name, options.threshold)
+
+if __name__ == '__main__':
+ main()
+
--- /dev/null
+#!@PYTHON@
+
+import os
+import re
+import sys
+
+frm = re.compile (sys.argv[1], re.MULTILINE)
+to = sys.argv[2]
+
+if not sys.argv[3:] or sys.argv[3] == '-':
+ sys.stdout.write (re.sub (frm, to, sys.stdin.read ()))
+for file in sys.argv[3:]:
+ s = open (file).read ()
+ name = os.path.basename (file)
+ base, ext = os.path.splitext (name)
+ t = re.sub (frm, to % locals (), s)
+ if s != t:
+ if 1:
+ os.system ('mv %(file)s %(file)s~~' % locals ())
+ h = open (file, "w")
+ h.write (t)
+ h.close ()
+ else:
+ sys.stdout.write (t)
--- /dev/null
+#!@PYTHON@
+# -*- coding: utf-8 -*-
+# texi-gettext.py
+
+# USAGE: texi-gettext.py [-o OUTDIR] LANG FILES
+#
+# -o OUTDIR specifies that output files should rather be written in OUTDIR
+#
+
+print "texi_gettext.py"
+
+import sys
+import re
+import os
+import getopt
+
+import langdefs
+
+optlist, args = getopt.getopt (sys.argv[1:],'o:')
+lang = args[0]
+files = args[1:]
+
+outdir = '.'
+for x in optlist:
+ if x[0] == '-o':
+ outdir = x[1]
+
+double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
+_doc = langdefs.translation[lang]
+
+include_re = re.compile (r'@include ((?!../lily-).*?)\.texi$', re.M)
+whitespaces = re.compile (r'\s+')
+ref_re = re.compile (r'(?ms)@(rglos|ruser|rprogram|ref)(\{)(.*?)(\})')
+node_section_re = re.compile (r'@(node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading)( )(.*?)(\n)')
+menu_entry_re = re.compile (r'\* (.*?)::')
+
+def title_gettext (m):
+ if m.group (2) == '{':
+ r = whitespaces.sub (' ', m.group (3))
+ else:
+ r = m.group (3)
+ return '@' + m.group (1) + m.group (2) + _doc (r) + m.group (4)
+
+def menu_entry_gettext (m):
+ return '* ' + _doc (m.group (1)) + '::'
+
+def include_replace (m, filename):
+ if os.path.exists (os.path.join (os.path.dirname (filename), m.group(1)) + '.texi'):
+ return '@include ' + m.group(1) + '.pdftexi'
+ return m.group(0)
+
+def process_file (filename):
+ print "Processing %s" % filename
+ f = open (filename, 'r')
+ page = f.read ()
+ f.close()
+ page = node_section_re.sub (title_gettext, page)
+ page = ref_re.sub (title_gettext, page)
+ page = menu_entry_re.sub (menu_entry_gettext, page)
+ page = page.replace ("""-- SKELETON FILE --
+When you actually translate this file, please remove these lines as
+well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", '')
+ page = page.replace ('UNTRANSLATED NODE: IGNORE ME', _doc ("This section has not been translated yet; please refer to the manual in English."))
+ includes = include_re.findall (page)
+ page = include_re.sub (lambda m: include_replace (m, filename), page)
+ p = os.path.join (outdir, filename) [:-4] + 'pdftexi'
+ f = open (p, 'w')
+ f.write (page)
+ f.close ()
+ dir = os.path.dirname (filename)
+ for file in includes:
+ p = os.path.join (dir, file) + '.texi'
+ if os.path.exists (p):
+ process_file (p)
+
+for filename in files:
+ process_file (filename)
--- /dev/null
+#!@PYTHON@
+
+import getopt
+import os
+import re
+import sys
+import time
+
+def usage ():
+ sys.stderr.write ('''
+texi2omf [options] FILE.texi > FILE.omf
+
+Options:
+
+--format=FORM set format FORM (HTML, PS, PDF, [XML]).
+--location=FILE file name as installed on disk.
+--version=VERSION
+
+Use the following commands (enclose in @ignore)
+
+@omfsubject . .
+@omfdescription . .
+@omftype . .
+
+etc.
+
+
+''')
+
+(options, files) = getopt.getopt (sys.argv[1:], '',
+ ['format=', 'location=', 'version='])
+
+license = 'FDL'
+location = ''
+version = ''
+email = os.getenv ('MAILADDRESS')
+name = os.getenv ('USERNAME')
+format = 'xml'
+
+for (o, a) in options:
+ if o == '--format':
+ format = a
+ elif o == '--location':
+ location = 'file:%s' % a
+ elif o == '--version':
+ version = a
+ else:
+ assert 0
+
+
+if not files:
+ usage ()
+ sys.exit (2)
+
+
+formats = {
+ 'html' : 'text/html',
+ 'pdf' : 'application/pdf',
+ 'ps.gz' : 'application/postscript',
+ 'ps' : 'application/postscript',
+ 'xml' : 'text/xml',
+ }
+
+if not formats.has_key (format):
+ sys.stderr.write ("Format `%s' unknown\n" % format)
+ sys.exit (1)
+
+
+infile = files[0]
+
+today = time.localtime ()
+
+texi = open (infile).read ()
+
+if not location:
+ location = 'file:/%s' % re.sub (r'\..*', '.' + format, infile)
+
+omf_vars = {
+ 'date': '%d-%d-%d' % today[:3],
+ 'mimeformat': formats[format],
+ 'maintainer': "%s (%s)" % (name, email),
+ 'version' : version,
+ 'location' : location,
+ 'language' : 'C',
+ }
+
+omf_caterories = ['subject', 'creator', 'maintainer', 'contributor',
+ 'title', 'subtitle', 'version', 'category', 'type',
+ 'description', 'license', 'language',]
+
+for a in omf_caterories:
+ m = re.search ('@omf%s (.*)\n'% a, texi)
+ if m:
+ omf_vars[a] = m.group (1)
+ elif not omf_vars.has_key (a):
+ omf_vars[a] = ''
+
+if not omf_vars['title']:
+ title = ''
+ m = re.search ('@title (.*)\n', texi)
+ if m:
+ title = m.group (1)
+
+ subtitle = ''
+ m = re.search ('@subtitle (.*)\n', texi)
+ if m:
+ subtitle = m.group (1)
+
+ if subtitle:
+ title = '%s -- %s' % (title, subtitle)
+
+ omf_vars['title'] = title
+
+if not omf_vars['creator']:
+ m = re.search ('@author (.*)\n', texi)
+ if m:
+ omf_vars['creator'] = m.group (1)
+
+
+
+print r'''<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE omf PUBLIC "-//OMF//DTD Scrollkeeper OMF Variant V1.0//EN" "http://scrollkeeper.sourceforge.net/dtds/scrollkeeper-omf-1.0/scrollkeeper-omf.dtd">
+<omf>
+ <resource>
+ <creator>
+ %(creator)s
+ </creator>
+ <maintainer>
+ %(maintainer)s
+ </maintainer>
+ <title>
+ %(title)s
+ </title>
+ <date>
+ %(date)s
+ </date>
+ <version identifier="%(version)s" date="%(date)s" />
+ <subject category="%(category)s"/>
+ <description>
+ %(description)s
+ </description>
+ <type>
+ %(type)s
+ </type>
+ <format mime="%(mimeformat)s" />
+ <identifier url="%(location)s"/>
+ <language code="%(language)s"/>
+ <rights type="%(license)s" />
+ </resource>
+</omf>
+
+''' % omf_vars
+
+
--- /dev/null
+#!@PYTHON@
+
+## This is www_post.py. This script is the main stage
+## of toplevel GNUmakefile local-WWW-post target.
+
+# USAGE: www_post PACKAGE_NAME TOPLEVEL_VERSION OUTDIR TARGETS
+# please call me from top of the source directory
+
+import sys
+import os
+import re
+
+import langdefs
+
+import mirrortree
+import postprocess_html
+
+package_name, package_version, outdir, targets = sys.argv[1:]
+targets = targets.split (' ')
+outdir = os.path.normpath (outdir)
+doc_dirs = ['input', 'Documentation', outdir]
+target_pattern = os.path.join (outdir, '%s-root')
+
+# these redirection pages allow to go back to the documentation index
+# from HTML manuals/snippets page
+static_files = {
+ os.path.join (outdir, 'index.html'):
+ '''<META HTTP-EQUIV="refresh" content="0;URL=Documentation/index.html">
+<html><body>Redirecting to the documentation index...</body></html>\n''',
+ os.path.join (outdir, 'VERSION'):
+ package_version + '\n',
+ os.path.join ('input', 'lsr', outdir, 'index.html'):
+ '''<META HTTP-EQUIV="refresh" content="0;URL=../../index.html">
+<html><body>Redirecting to the documentation index...</body></html>\n'''
+ }
+
+for l in langdefs.LANGUAGES:
+ static_files[os.path.join ('Documentation', 'user', outdir, l.file_name ('index', '.html'))] = \
+ '<META HTTP-EQUIV="refresh" content="0;URL=../' + l.file_name ('index', '.html') + \
+ '">\n<html><body>Redirecting to the documentation index...</body></html>\n'
+
+for f, contents in static_files.items ():
+ open (f, 'w').write (contents)
+
+sys.stderr.write ("Mirrorring...\n")
+dirs, symlinks, files = mirrortree.walk_tree (
+ tree_roots = doc_dirs,
+ process_dirs = outdir,
+ exclude_dirs = '(^|/)(' + r'|po|out|out-test|.*?[.]t2d|\w*?-root)(/|$)|Documentation/(' + '|'.join ([l.code for l in langdefs.LANGUAGES]) + ')',
+ find_files = r'.*?\.(?:midi|html|pdf|png|txt|i?ly|signature|css)$|VERSION',
+ exclude_files = r'lily-[0-9a-f]+.*\.(pdf|txt)')
+
+# actual mirrorring stuff
+html_files = []
+hardlinked_files = []
+for f in files:
+ if f.endswith ('.html'):
+ html_files.append (f)
+ else:
+ hardlinked_files.append (f)
+dirs = [re.sub ('/' + outdir, '', d) for d in dirs]
+while outdir in dirs:
+ dirs.remove (outdir)
+dirs = list (set (dirs))
+dirs.sort ()
+
+strip_file_name = {}
+strip_re = re.compile (outdir + '/')
+for t in targets:
+ out_root = target_pattern % t
+ strip_file_name[t] = lambda s: os.path.join (target_pattern % t, (strip_re.sub ('', s)))
+ os.mkdir (out_root)
+ map (os.mkdir, [os.path.join (out_root, d) for d in dirs])
+ for f in hardlinked_files:
+ os.link (f, strip_file_name[t] (f))
+ for l in symlinks:
+ p = mirrortree.new_link_path (os.path.normpath (os.readlink (l)), os.path.dirname (l), strip_re)
+ dest = strip_file_name[t] (l)
+ if not os.path.exists (dest):
+ os.symlink (p, dest)
+
+ ## ad-hoc renaming to make xrefs between PDFs work
+ os.rename (os.path.join (out_root, 'input/lsr/lilypond-snippets.pdf'),
+ os.path.join (out_root, 'Documentation/user/lilypond-snippets.pdf'))
+
+# need this for content negotiation with documentation index
+if 'online' in targets:
+ f = open (os.path.join (target_pattern % 'online', 'Documentation/.htaccess'), 'w')
+ f.write ('#.htaccess\nDirectoryIndex index\n')
+ f.close ()
+
+postprocess_html.build_pages_dict (html_files)
+for t in targets:
+ sys.stderr.write ("Processing HTML pages for %s target...\n" % t)
+ postprocess_html.process_html_files (
+ package_name = package_name,
+ package_version = package_version,
+ target = t,
+ name_filter = strip_file_name[t])
+
# We must invoke the generated $(outdir)/help2man script instead of
-# the help2man.pl source, which means that the buildscripts directory
+# the help2man.pl source, which means that the scripts/build directory
# must be built first.
#
# From the perlrun man-page:
# cases. Four more explaining what a line comment is, and that it may
# be parsed, same here.
-HELP2MAN_COMMAND = $(PERL) $(top-build-dir)/buildscripts/$(outbase)/help2man $< > $@
+HELP2MAN_COMMAND = $(buildscript-dir)/help2man $< > $@
ifeq ($(strip $(CROSS)),no)
-$(outdir)/%.1: $(outdir)/%
+$(outdir)/%.1: $(outdir)/% $(buildscript-dir)/help2man
$(HELP2MAN_COMMAND)
else
# When cross building, some manpages will not build because the
$(outdir)/%.1: out/%.1
cp $< $@
endif
+
+$(buildscript-dir)/help2man:
+ $(MAKE) -C $(depth)/scripts/build
TMP=`mktemp -d $(outdir)/pfbtemp.XXXXXXXXX` \
&& ( cd $$TMP \
&& ln -s ../mf2pt1.mem . \
- && MFINPUTS=$(top-src-dir)/mf:..:: $(PERL) $(top-src-dir)/buildscripts/mf2pt1.pl $(MF2PT1_OPTIONS) $< $(METAFONT_QUIET)) \
+ && MFINPUTS=$(top-src-dir)/mf:..:: $(buildscript-dir)/mf2pt1 $(MF2PT1_OPTIONS) $< $(METAFONT_QUIET)) \
&& mv $$TMP/*pfb $(outdir); \
rm -rf $$TMP
# $(outdir)/$(INFO_IMAGES_DIR)/*.png symlinks are only needed to view
# out-www/*.info with Emacs -- HTML docs no longer need these
# symlinks, see replace_symlinks_urls in
-# buildscripts/add_html_footer.py.
+# python/aux/postprocess_html.py.
# make dereferences symlinks, and $(INFO_IMAGES_DIR) is a symlink
# to $(outdir), so we can't use directly $(INFO_IMAGES_DIR) as a
# prerequisite, otherwise %.info are always outdated (because older
-# than $(outdir), hence this .dep file
+# than $(outdir)), hence this .dep file
$(outdir)/$(INFO_IMAGES_DIR).info-images-dir-dep: $(INFO_DOCS:%=$(outdir)/%.texi)
ifneq ($(INFO_IMAGES_DIR),)
ln -s $(outdir) $(INFO_IMAGES_DIR)
mkdir -p $(outdir)/$(INFO_IMAGES_DIR)
rm -f $(outdir)/$(INFO_IMAGES_DIR)/[a-f0-9][a-f0-9]
- cd $(outdir)/$(INFO_IMAGES_DIR) && $(PYTHON) $(top-src-dir)/buildscripts/mass-link.py symbolic .. . [a-f0-9][a-f0-9]
+ cd $(outdir)/$(INFO_IMAGES_DIR) && $(buildscript-dir)/mass-link symbolic .. . [a-f0-9][a-f0-9]
endif
touch $@
cp $< $@
$(XREF_MAPS_DIR)/%.xref-map: $(outdir)/%.texi
- $(PYTHON) $(buildscript-dir)/extract_texi_filenames.py -o $(XREF_MAPS_DIR) $<
+ $(buildscript-dir)/extract_texi_filenames -o $(XREF_MAPS_DIR) $<
$(outdir)/version.%: $(top-src-dir)/VERSION
OUTTXT_FILES += $(addprefix $(outdir)/,$(TEXI_FILES:.texi=.txt))
-GENERATE_OMF = $(PYTHON) $(buildscript-dir)/texi2omf.py --format $(1) --location $(webdir)/$(tree-dir)/out-www/$(notdir $(basename $@)) --version $(TOPLEVEL_VERSION) $< > $@
+GENERATE_OMF = $(buildscript-dir)/texi2omf --format $(1) --location $(webdir)/$(tree-dir)/out-www/$(notdir $(basename $@)) --version $(TOPLEVEL_VERSION) $< > $@
TEXINFO_PAPERSIZE_OPTION= $(if $(findstring $(PAPERSIZE),a4),,-t @afourpaper)
LILYPOND_WORDS = $(outdir)/lilypond-words $(outdir)/lilypond-words.vim
LILYPOND_WORDS_DEPENDS =\
$(top-src-dir)/lily/lily-lexer.cc \
- $(buildscript-dir)/lilypond-words.py \
+ $(buildscript-dir)/lilypond-words \
$(top-src-dir)/scm/markup.scm \
$(top-src-dir)/ly/engraver-init.ly
done
-rmdir -p $(DESTDIR)$(vimdir)
+$(buildscript-dir)/lilypond-words:
+ make -C $(depth)/scripts/build
+
$(LILYPOND_WORDS):
- cd $(top-src-dir) && $(PYTHON) buildscripts/lilypond-words.py --words --vim --dir=$(top-build-dir)/vim/$(outconfbase)
+ cd $(top-src-dir) && $(buildscript-dir)/lilypond-words --words --vim --dir=$(top-build-dir)/vim/$(outconfbase)
all: $(LILYPOND_WORDS)