2006-05-01 Han-Wen Nienhuys <hanwen@lilypond.org>
+ * *.py: more 4 space indents.
+
* autogen.sh (conf_flags): set sensible default for with-ncsb-dir
2006-05-01 Joe Neeman <joeneeman@gmail.com>
import string
if len (sys.argv) < 5:
- print 'args: LOGFILE CLASS FUNC NEW_FUNC'
-
+ print 'args: LOGFILE CLASS FUNC NEW_FUNC'
+
func = sys.argv[3]
new_func = sys.argv[4]
klazz = sys.argv[2]
files = {}
for l in log_ls:
- m = regex.search (l)
- if not m:
- continue
- print l
+ m = regex.search (l)
+ if not m:
+ continue
+ print l
- file = m.group (1)
- line_no = string.atoi (m.group (2))
- klass = m.group (3)
-
- if klass <> klazz:
- continue
+ file = m.group (1)
+ line_no = string.atoi (m.group (2))
+ klass = m.group (3)
+
+ if klass <> klazz:
+ continue
- if not files.has_key (file):
- files[file] = open (file).read ().split ('\n')
+ if not files.has_key (file):
+ files[file] = open (file).read ().split ('\n')
- line_no -= 1
- files[file][line_no] = re.sub (func, new_func, files[file][line_no])
+ line_no -= 1
+ files[file][line_no] = re.sub (func, new_func, files[file][line_no])
for (f,ls) in files.items():
- print 'writing ', f
- os.rename (f, f + '~')
- open (f, 'w').write ('\n'.join (ls))
+ print 'writing ', f
+ os.rename (f, f + '~')
+ open (f, 'w').write ('\n'.join (ls))
# usage:
def usage ():
- print 'usage: %s [-s style] [-o <outfile>] BIBFILES...';
+ print 'usage: %s [-s style] [-o <outfile>] BIBFILES...';
#print os.environ['BSTINPUTS']
output = 'bib.html'
style = 'long'
for (o,a) in options:
- if o == '-h' or o == '--help':
- usage ()
- sys.exit (0)
- elif o == '-s' or o == '--style':
- style = a
- elif o == '-o' or o == '--output':
- output = a
- else:
- raise 'unknown opt ', o
+ if o == '-h' or o == '--help':
+ usage ()
+ sys.exit (0)
+ elif o == '-s' or o == '--style':
+ style = a
+ elif o == '-o' or o == '--output':
+ output = a
+ else:
+ raise 'unknown opt ', o
if style not in ['alpha','index','long','longp','long-pario','short','short-pario','split']:
- sys.stderr.write ("Unknown style \`%s'\n" % style)
+ sys.stderr.write ("Unknown style \`%s'\n" % style)
tempfile = tempfile.mktemp ('bib2html')
if not files:
- usage ()
- sys.exit (2)
+ usage ()
+ sys.exit (2)
def strip_extension (f, ext):
- (p, e) = os.path.splitext (f)
- if e == ext:
- e = ''
- return p + e
+ (p, e) = os.path.splitext (f)
+ if e == ext:
+ e = ''
+ return p + e
nf = []
for f in files:
- nf.append (strip_extension(f, '.bib'))
+ nf.append (strip_extension(f, '.bib'))
files = string.join (nf,',')
sys.stdout.write ("Invoking `%s'\n" % cmd)
stat = os.system (cmd)
if stat <> 0:
- sys.exit(1)
+ sys.exit(1)
#TODO: do tex -> html on output
def cleanup (tempfile):
- for a in ['aux','bbl', 'blg']:
- os.unlink (tempfile + '.' + a)
+ for a in ['aux','bbl', 'blg']:
+ os.unlink (tempfile + '.' + a)
cleanup(tempfile)
# utility
def add_suffixes (target, source, env, target_suffixes, src_suffixes):
- base = os.path.splitext (str (target[0]))[0]
- return (target + map (lambda x: base + x, target_suffixes),
- source + map (lambda x: base + x, src_suffixes))
+ base = os.path.splitext (str (target[0]))[0]
+ return (target + map (lambda x: base + x, target_suffixes),
+ source + map (lambda x: base + x, src_suffixes))
# junkme; see _concat
def join_path (path, infix=os.pathsep, prefix = ''):
- def dir (x):
- if x and x[0] == '#':
- return env['srcdir'] + x[1:]
- return x
- return string.join (map (lambda x: prefix + dir (x), path), infix)
+ def dir (x):
+ if x and x[0] == '#':
+ return env['srcdir'] + x[1:]
+ return x
+ return string.join (map (lambda x: prefix + dir (x), path), infix)
def src_glob (s):
- here = os.getcwd ()
- os.chdir (env.Dir ('.').srcnode ().abspath)
- result = glob.glob (s)
- os.chdir (here)
- return result
+ here = os.getcwd ()
+ os.chdir (env.Dir ('.').srcnode ().abspath)
+ result = glob.glob (s)
+ os.chdir (here)
+ return result
Export ('src_glob')
def base_glob (s):
- return map (lambda x: os.path.splitext (x)[0], src_glob (s))
+ return map (lambda x: os.path.splitext (x)[0], src_glob (s))
Export ('base_glob')
def install (target, dir):
- dest = env['DESTDIR'] + dir
- if type (target) == type ([]):
- map (lambda x: env.Install (dir, x), target)
- else:
- env.Install (dir, target)
- env.Alias ('install', dir)
+ dest = env['DESTDIR'] + dir
+ if type (target) == type ([]):
+ map (lambda x: env.Install (dir, x), target)
+ else:
+ env.Install (dir, target)
+ env.Alias ('install', dir)
Export ('install')
def _fixme (s):
- x = string.replace (s, '#', env['srcdir'])
- x = string.replace (x, '@', env['absbuild'])
- return x
+ x = string.replace (s, '#', env['srcdir'])
+ x = string.replace (x, '@', env['absbuild'])
+ return x
# Clean separation between generic action + flags and actual
# configuration and flags in environment for this build.
HH = Builder (action = 'bison -d -o ${TARGET.base}.cc $SOURCE',
- suffix = '.hh', src_suffix = '.yy')
+ suffix = '.hh', src_suffix = '.yy')
env.Append (BUILDERS = {'HH' : HH})
# some of these commands in the ENVironment.
env.Append (
- _fixme = _fixme,
- ##ABC2LY = 'abc2ly',
- ##LILYPOND = 'lilypond',
- LILYOND_BOOK = 'lilypond-book',
-
- #ugr
- #LILYPOND_BOOK_FORMAT = 'texi',
- LILYPOND_BOOK_FORMAT = '',
- #LILYPOND_BOOK_FLAGS = ['--format=$LILYPOND_BOOK_FORMAT'],
- LILYPOND_BOOK_FLAGS = '''--format=$LILYPOND_BOOK_FORMAT --process="lilypond --backend=eps --formats=ps,png --header=texidoc -I$srcdir/input/test -e '(ly:set-option (quote internal-type-checking) #t)'" ''',
-
- LILYPOND_PATH = [],
- # The SCons way around FOO_PATH:
- ##LILYPOND_INCFLAGS = '$( ${_concat(INCPREFIX, LILYPOND_PATH, INCSUFFIX, __env__, RDirs)} $)',
- LILYPOND_INCFLAGS = '$( ${_concat(INCPREFIX, LILYPOND_PATH, INCSUFFIX, __env__)} $)',
-
- MAKEINFO_PATH = [],
- MAKEINFO_FLAGS = [],
- MAKEINFO_INCFLAGS = '$( ${_concat(INCPREFIX, MAKEINFO_PATH, INCSUFFIX, __env__, RDirs)} $)',
- # should not be necessary
- # PYTHONPATH = ['$absbuild/python/$out'],
- TEXI2DVI_FLAGS = [],
- _TEXI2DVI_FLAGS = '$( ${_concat(" ", TEXI2DVI_FLAGS,)} $)',
- )
+ _fixme = _fixme,
+ ##ABC2LY = 'abc2ly',
+ ##LILYPOND = 'lilypond',
+ LILYOND_BOOK = 'lilypond-book',
+
+ #ugr
+ #LILYPOND_BOOK_FORMAT = 'texi',
+ LILYPOND_BOOK_FORMAT = '',
+ #LILYPOND_BOOK_FLAGS = ['--format=$LILYPOND_BOOK_FORMAT'],
+ LILYPOND_BOOK_FLAGS = '''--format=$LILYPOND_BOOK_FORMAT --process="lilypond --backend=eps --formats=ps,png --header=texidoc -I$srcdir/input/test -e '(ly:set-option (quote internal-type-checking) #t)'" ''',
+
+ LILYPOND_PATH = [],
+ # The SCons way around FOO_PATH:
+ ##LILYPOND_INCFLAGS = '$( ${_concat(INCPREFIX, LILYPOND_PATH, INCSUFFIX, __env__, RDirs)} $)',
+ LILYPOND_INCFLAGS = '$( ${_concat(INCPREFIX, LILYPOND_PATH, INCSUFFIX, __env__)} $)',
+
+ MAKEINFO_PATH = [],
+ MAKEINFO_FLAGS = [],
+ MAKEINFO_INCFLAGS = '$( ${_concat(INCPREFIX, MAKEINFO_PATH, INCSUFFIX, __env__, RDirs)} $)',
+ # should not be necessary
+ # PYTHONPATH = ['$absbuild/python/$out'],
+ TEXI2DVI_FLAGS = [],
+ _TEXI2DVI_FLAGS = '$( ${_concat(" ", TEXI2DVI_FLAGS,)} $)',
+ )
TXT =\
- Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCFLAGS\
- --no-split --no-headers $SOURCE',
- suffix = '.txt', src_suffix = '.texi')
+ Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCFLAGS\
+ --no-split --no-headers $SOURCE',
+ suffix = '.txt', src_suffix = '.texi')
env.Append (BUILDERS = {'TXT': TXT})
INFO =\
- Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCFLAGS $SOURCE',
- suffix = '.info', src_suffix = '.texi')
+ Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCFLAGS $SOURCE',
+ suffix = '.info', src_suffix = '.texi')
env.Append (BUILDERS = {'INFO': INFO})
HTML =\
- Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCLUDES\
- --html --no-split --no-headers $MAKEINFO_FLAGS $SOURCE',
+ Builder (action = '$MAKEINFO --output=$TARGET $MAKEINFO_INCLUDES\
+ --html --no-split --no-headers $MAKEINFO_FLAGS $SOURCE',
suffix = '.html', src_suffix = '.texi')
env.Append (BUILDERS = {'HTML': HTML})
TEXI =\
- Builder (action =
- '$LILYPOND_BOOK --output=${TARGET.dir} \
- --include=${TARGET.dir} $LILYPOND_INCFLAGS \
- --process="$LILYPOND $LILYPOND_INCFLAGS" \
- $LILYPOND_BOOK_FLAGS \
- $SOURCE',
- suffix = '.texi', src_suffix = '.tely')
+ Builder (action =
+ '$LILYPOND_BOOK --output=${TARGET.dir} \
+ --include=${TARGET.dir} $LILYPOND_INCFLAGS \
+ --process="$LILYPOND $LILYPOND_INCFLAGS" \
+ $LILYPOND_BOOK_FLAGS \
+ $SOURCE',
+ suffix = '.texi', src_suffix = '.tely')
env.Append (BUILDERS = {'TEXI': TEXI})
TEXIDVI =\
- Builder (action = 'cd ${TARGET.dir} && \
- texi2dvi --batch $_TEXI2DVI_FLAGS ${SOURCE.file}',
- suffix = '.dvi', src_suffix = '.texi')
+ Builder (action = 'cd ${TARGET.dir} && \
+ texi2dvi --batch $_TEXI2DVI_FLAGS ${SOURCE.file}',
+ suffix = '.dvi', src_suffix = '.texi')
env.Append (BUILDERS = {'TEXIDVI': TEXIDVI})
DVIPS =\
- Builder (action = 'TEXINPUTS=${TARGET.dir}:$$TEXINPUTS $DVIPS -o $TARGET $DVIPS_FLAGS $SOURCE',
- suffix = '.ps', src_suffix = '.dvi')
+ Builder (action = 'TEXINPUTS=${TARGET.dir}:$$TEXINPUTS $DVIPS -o $TARGET $DVIPS_FLAGS $SOURCE',
+ suffix = '.ps', src_suffix = '.dvi')
env.Append (BUILDERS = {'DVIPS': DVIPS})
DVIPDF =\
- Builder (action = 'TEXINPUTS=${TARGET.dir}:$$TEXINPUTS $DVIPS -o $TARGET -Ppdf $DVIPS_FLAGS $SOURCE',
- suffix = '.pdfps', src_suffix = '.dvi')
+ Builder (action = 'TEXINPUTS=${TARGET.dir}:$$TEXINPUTS $DVIPS -o $TARGET -Ppdf $DVIPS_FLAGS $SOURCE',
+ suffix = '.pdfps', src_suffix = '.dvi')
env.Append (BUILDERS = {'DVIPDF': DVIPDF})
PSPDF =\
- Builder (action = 'ps2pdf $PSPDF_FLAGS $SOURCE $TARGET',
- suffix = '.pdf', src_suffix = '.pdfps')
+ Builder (action = 'ps2pdf $PSPDF_FLAGS $SOURCE $TARGET',
+ suffix = '.pdf', src_suffix = '.pdfps')
env.Append (BUILDERS = {'PSPDF': PSPDF})
PNG2EPS =\
- Builder (action = 'convert $SOURCE $TARGET',
- suffix = '.eps', src_suffix = '.png')
+ Builder (action = 'convert $SOURCE $TARGET',
+ suffix = '.eps', src_suffix = '.png')
env.Append (BUILDERS = {'PNG2EPS': PNG2EPS})
env.Append (
- #urg
- BSTINPUTS = '${SOURCE.dir}:${TARGET.dir}:',
- BIB2HTML = '$PYTHON $srcdir/buildscripts/bib2html.py',
+ #urg
+ BSTINPUTS = '${SOURCE.dir}:${TARGET.dir}:',
+ BIB2HTML = '$PYTHON $srcdir/buildscripts/bib2html.py',
)
def add_ps_target (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.ps'], source)
+ base = os.path.splitext (str (target[0]))[0]
+ return (target + [base + '.ps'], source)
lilypond =\
- Builder (action = '$LILYPOND --output=${TARGET.base} --include=${TARGET.dir} $SOURCE',
- suffix = '.pdf', src_suffix = '.ly')
-## emitter = add_ps_target)
+ Builder (action = '$LILYPOND --output=${TARGET.base} --include=${TARGET.dir} $SOURCE',
+ suffix = '.pdf', src_suffix = '.ly')
+## emitter = add_ps_target)
env.Append (BUILDERS = {'LilyPond': lilypond})
ABC = Builder (action = '$ABC2LY --output=${TARGET} --strict $SOURCE',
- suffix = '.ly', src_suffix = '.abc')
+ suffix = '.ly', src_suffix = '.abc')
env.Append (BUILDERS = {'ABC': ABC})
def add_log_target (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.log'], source)
+ base = os.path.splitext (str (target[0]))[0]
+ return (target + [base + '.log'], source)
def add_tfm_target (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.tfm'], source)
+ base = os.path.splitext (str (target[0]))[0]
+ return (target + [base + '.tfm'], source)
def add_lisp_enc_tex_ly_target (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.lisp', base + '.enc', base + '.tex',
- base + 'list.ly'],
- source)
+ base = os.path.splitext (str (target[0]))[0]
+ return (target + [base + '.lisp', base + '.enc', base + '.tex',
+ base + 'list.ly'],
+ source)
def add_cff_cffps_svg (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target + [base + '.cff', base + '.cff.ps', base + '.svg'],
- source)
+ base = os.path.splitext (str (target[0]))[0]
+ return (target + [base + '.cff', base + '.cff.ps', base + '.svg'],
+ source)
a = 'cd ${TARGET.dir} \
&& MFINPUTS=.:${SOURCE.dir}:$srcdir/${SOURCE.dir}: \
$MF "\\mode:=$MFMODE; nonstopmode; input ${SOURCE.filebase};" \
| grep -v "@\|>>\|w:\|h:";'
tfm = Builder (action = a, suffix = '.tfm', src_suffix = '.mf',
-# emitter = lambda t, s, e: add_suffixes (t, s, e, ['.log'], []))
- emitter = add_log_target)
+# emitter = lambda t, s, e: add_suffixes (t, s, e, ['.log'], []))
+ emitter = add_log_target)
env.Append (BUILDERS = {'TFM': tfm})
a = '$PYTHON $MF_TO_TABLE_PY \
--ly=${TARGET.base}list.ly \
${TARGET.base}.log'
gtable = Builder (action = a, suffix = '.otf-gtable', src_suffix = '.log',
- emitter = add_lisp_enc_tex_ly_target)
+ emitter = add_lisp_enc_tex_ly_target)
env.Append (BUILDERS = {'GTABLE': gtable})
def add_enc_src (target, source, env):
- base = os.path.splitext (str (target[0]))[0]
- return (target, source + [base + '.enc'])
+ base = os.path.splitext (str (target[0]))[0]
+ return (target, source + [base + '.enc'])
# FIXME UGH, should fix --output option for mftrace
a = 'cd ${TARGET.dir} && \
${SOURCE.file}'
pfa = Builder (action = a,
- suffix = '.pfa',
- src_suffix = '.mf',
- emitter = add_enc_src)
+ suffix = '.pfa',
+ src_suffix = '.mf',
+ emitter = add_enc_src)
env.Append (BUILDERS = {'PFA': pfa})
a = ['(cd ${TARGET.dir} && $FONTFORGE -script ${SOURCE.file})',
# '$PYTHON $srcdir/buildscripts/ps-embed-cff.py ${SOURCE.base}.cff $$(cat ${SOURCE.base}.fontname) ${SOURCE.base}.cff.ps',
- 'rm -f ${TARGET.dir}/*.scale.pfa']
+ 'rm -f ${TARGET.dir}/*.scale.pfa']
otf = Builder (action = a,
- suffix = '.otf',
- src_suffix = '.pe',
-# emitter = add_cff_cffps_svg
- )
+ suffix = '.otf',
+ src_suffix = '.pe',
+# emitter = add_cff_cffps_svg
+ )
env.Append (BUILDERS = {'OTF': otf})
# naming
def at_copy (target, source, env):
- n = str (source[0])
- s = open (n).read ()
- for i in atvars:
- if env.has_key (i):
- s = string.replace (s, '@%s@'% i, env[i])
- t = str (target[0])
- open (t, 'w').write (s)
- # wugh
- if os.path.basename (os.path.dirname (str (target[0]))) == 'bin':
- os.chmod (t, 0755)
+ n = str (source[0])
+ s = open (n).read ()
+ for i in atvars:
+ if env.has_key (i):
+ s = string.replace (s, '@%s@'% i, env[i])
+ t = str (target[0])
+ open (t, 'w').write (s)
+ # wugh
+ if os.path.basename (os.path.dirname (str (target[0]))) == 'bin':
+ os.chmod (t, 0755)
AT_COPY = Builder (action = at_copy, src_suffix = ['.in', '.py', '.sh',])
env.Append (BUILDERS = {'AT_COPY': AT_COPY})
# naming
def at_copy_ext (target, source, env):
- n = str (source[0])
- s = open (n).read ()
- for i in atvars:
- if env.has_key (i):
- s = string.replace (s, '@%s@'% i, env[i])
- # whugh
- e = os.path.splitext (n)[1]
- t = str (target[0]) + e
- open (t, 'w').write (s)
+ n = str (source[0])
+ s = open (n).read ()
+ for i in atvars:
+ if env.has_key (i):
+ s = string.replace (s, '@%s@'% i, env[i])
+ # whugh
+ e = os.path.splitext (n)[1]
+ t = str (target[0]) + e
+ open (t, 'w').write (s)
AT_COPY_EXT = Builder (action = at_copy_ext, src_suffix = ['.py', '.sh',])
env.Append (BUILDERS = {'AT_COPY_EXT': AT_COPY_EXT})
MO = Builder (action = 'msgfmt -o $TARGET $SOURCE',
- suffix = '.mo', src_suffix = '.po')
+ suffix = '.mo', src_suffix = '.po')
env.Append (BUILDERS = {'MO': MO})
# ' '; ?
--output-dir=${TARGET.dir} --add-comments \
--keyword=_ --keyword=_f --keyword=_i $SOURCES'
PO = Builder (action = a, suffix = '.pot',
- src_suffix = ['.cc', '.hh', '.py'], multi = 1)
+ src_suffix = ['.cc', '.hh', '.py'], multi = 1)
env['potarget'] = os.path.join (env['absbuild'], 'po', env['out'],
- 'lilypond.pot')
+ 'lilypond.pot')
env['pocommand'] = a
ugh = '; mv ${TARGET} ${SOURCE}'
def mutopia (ly = None, abc = None):
- # FIXME: ugr, huh? The values from ../SConstruct get appended
- # to the predefined values from this builder context:
-
- # abc2ly/usr/bin/python ..../abc2.py
-
- # Override them again to fix web build...
-
-
- #BUILD_ABC2LY = '${set__x}$PYTHON $srcdir/scripts/abc2ly.py'
- #BUILD_LILYPOND = '$absbuild/$out/lilypond ${__verbose}'
- e = env.Copy (
- #LILYPOND = BUILD_LILYPOND,
- #ABC2LY = BUILD_ABC2LY,
- )
-
- if not abc:
- abc = base_glob ('*.abc')
- if not ly:
- ly = base_glob ('*.ly') + map (e.ABC, abc)
- pdf = map (e.LilyPond, ly)
-
- # We need lily and mf to build these.
- env.Depends (pdf, ['#/lily', '#/mf'])
- env.Alias ('doc', pdf)
+ # FIXME: ugr, huh? The values from ../SConstruct get appended
+ # to the predefined values from this builder context:
+
+ # abc2ly/usr/bin/python ..../abc2.py
+
+ # Override them again to fix web build...
+
+
+ #BUILD_ABC2LY = '${set__x}$PYTHON $srcdir/scripts/abc2ly.py'
+ #BUILD_LILYPOND = '$absbuild/$out/lilypond ${__verbose}'
+ e = env.Copy (
+ #LILYPOND = BUILD_LILYPOND,
+ #ABC2LY = BUILD_ABC2LY,
+ )
+
+ if not abc:
+ abc = base_glob ('*.abc')
+ if not ly:
+ ly = base_glob ('*.ly') + map (e.ABC, abc)
+ pdf = map (e.LilyPond, ly)
+
+ # We need lily and mf to build these.
+ env.Depends (pdf, ['#/lily', '#/mf'])
+ env.Alias ('doc', pdf)
Export ('mutopia')
def collate (title = 'collated files'):
- ly = base_glob ('*.ly')
-
- e = env.Copy (
- TITLE = title,
- LILYPOND_BOOK_FLAGS = '''--process="lilypond --backend=eps --formats=ps,png --header=texidoc -I$srcdir/input/test -e '(ly:set-option (quote internal-type-checking) #t)'" ''',
- __verbose = ' --verbose',
- )
- #
- tely = e.LYS2TELY ('collated-files', ly)
- texi = e.TEXI (tely)
- # We need lily and mf to build these.
- env.Depends (texi, ['#/lily', '#/mf'])
- dvi = e.TEXIDVI (texi)
- pspdf = e.DVIPDF (dvi)
- pdf = e.PSPDF (pspdf)
- html = e.HTML (texi)
-
- env.Alias ('doc', pdf)
- env.Alias ('doc', html)
+ ly = base_glob ('*.ly')
+
+ e = env.Copy (
+ TITLE = title,
+ LILYPOND_BOOK_FLAGS = '''--process="lilypond --backend=eps --formats=ps,png --header=texidoc -I$srcdir/input/test -e '(ly:set-option (quote internal-type-checking) #t)'" ''',
+ __verbose = ' --verbose',
+ )
+ #
+ tely = e.LYS2TELY ('collated-files', ly)
+ texi = e.TEXI (tely)
+ # We need lily and mf to build these.
+ env.Depends (texi, ['#/lily', '#/mf'])
+ dvi = e.TEXIDVI (texi)
+ pspdf = e.DVIPDF (dvi)
+ pdf = e.PSPDF (pspdf)
+ html = e.HTML (texi)
+
+ env.Alias ('doc', pdf)
+ env.Alias ('doc', html)
Export ('collate')
indent_p = 0
rules = {
- GLOBAL_CXX:
- [
- # delete gratuitous block
- ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
- '\n\\2;'),
- ],
- CXX:
- [
- # space before parenthesis open
- ('([^\( \]])[ \t]*\(', '\\1 ('),
- # space after comma
- ("\([^'],\)[ \t]*", '\1 '),
- # delete gratuitous block
- ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
- '\n\\2;'),
- # delete inline tabs
- ('(\w)\t+', '\\1 '),
- # delete inline double spaces
- (' *', ' '),
- # delete space after parenthesis open
- ('\([ \t]*', '('),
- # delete space before parenthesis close
- ('[ \t]*\)', ')'),
- # delete spaces after prefix
- ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'),
- # delete spaces before postfix
- ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'),
- # delete space after parenthesis close
- #('\)[ \t]*([^\w])', ')\\1'),
- # delete space around operator
- # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
- ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
- # delete space after operator
- ('(::)([ \t]*)([\w\(\)])', '\\1\\3'),
- # delete superflous space around operator
- ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'),
- # space around operator1
- ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
- # space around operator2
- ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'),
- # space around operator3
- ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
- # space around operator4
- ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'),
- # space around +/-; exponent
- ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'),
- ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'),
- # trailing operator
- (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '),
- # pointer
- ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
- ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
- #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'),
- # pointer with template
- ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'),
- #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'),
- # unary pointer, minus, not
- ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'),
- # space after `operator'
- ('(\Woperator) *([^\w\s])', '\\1 \\2'),
- # dangling brace close
- ('\n[ \t]*(\n[ \t]*})', '\\1'),
- # dangling newline
- ('\n[ \t]*\n[ \t]*\n', '\n\n'),
- # dangling parenthesis open
- #('[ \t]*\n[ \t]*\([ \t]*\n', '('),
- ('\([ \t]*\n', '('),
- # dangling parenthesis close
- ('\n[ \t]*\)', ')'),
- # dangling comma
- ('\n[ \t]*,', ','),
- # dangling semicolon
- ('\n[ \t]*;', ';'),
- # brace open
- ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'),
- # brace open backslash
- ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'),
- # brace close
- ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'),
- # brace close backslash
- ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'),
- # delete space after `operator'
- #('(\Woperator) (\W)', '\\1\\2'),
- # delete space after case, label
- ('(\W(case|label) ([\w]+)) :', '\\1:'),
- # delete space before comma
- ('[ \t]*,', ','),
- # delete space before semicolon
- ('[ \t]*;', ';'),
- # delete space before eol-backslash
- ('[ \t]*\\\\\n', '\\\n'),
- # delete trailing whitespace
- ('[ \t]*\n', '\n'),
-
- ## Deuglify code that also gets ugly by rules above.
- # delete newline after typedef struct
- ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'),
- # delete spaces around template brackets
- #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'),
- ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'),
- ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'),
- ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'),
- ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'),
- # do {..} while
- ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'),
-
- ## Fix code that gets broken by rules above.
- ##('->\s+\*', '->*'),
- # delete space before #define x()
- ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('),
- # add space in #define x ()
- ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)',
- '#define \\1 \\2'),
- # delete space in #include <>
- ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>',
- '#include <\\1\\2\\3>'),
- # delete backslash before empty line (emacs' indent region is broken)
- ('\\\\\n\n', '\n\n'),
- ],
-
- COMMENT:
- [
- # delete trailing whitespace
- ('[ \t]*\n', '\n'),
- # delete empty first lines
- ('(/\*\n)\n*', '\\1'),
- # delete empty last lines
- ('\n*(\n\*/)', '\\1'),
- ## delete newline after start?
- #('/(\*)\n', '\\1'),
- ## delete newline before end?
- #('\n(\*/)', '\\1'),
- ],
- }
+ GLOBAL_CXX:
+ [
+ # delete gratuitous block
+ ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
+ '\n\\2;'),
+ ],
+ CXX:
+ [
+ # space before parenthesis open
+ ('([^\( \]])[ \t]*\(', '\\1 ('),
+ # space after comma
+ ("\([^'],\)[ \t]*", '\1 '),
+ # delete gratuitous block
+ ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
+ '\n\\2;'),
+ # delete inline tabs
+ ('(\w)\t+', '\\1 '),
+ # delete inline double spaces
+ (' *', ' '),
+ # delete space after parenthesis open
+ ('\([ \t]*', '('),
+ # delete space before parenthesis close
+ ('[ \t]*\)', ')'),
+ # delete spaces after prefix
+ ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'),
+ # delete spaces before postfix
+ ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'),
+ # delete space after parenthesis close
+ #('\)[ \t]*([^\w])', ')\\1'),
+ # delete space around operator
+ # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
+ ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
+ # delete space after operator
+ ('(::)([ \t]*)([\w\(\)])', '\\1\\3'),
+ # delete superflous space around operator
+ ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'),
+ # space around operator1
+ ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
+ # space around operator2
+ ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'),
+ # space around operator3
+ ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
+ # space around operator4
+ ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'),
+ # space around +/-; exponent
+ ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'),
+ ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'),
+ # trailing operator
+ (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '),
+ # pointer
+ ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
+ ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
+ #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'),
+ # pointer with template
+ ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'),
+ #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'),
+ # unary pointer, minus, not
+ ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'),
+ # space after `operator'
+ ('(\Woperator) *([^\w\s])', '\\1 \\2'),
+ # dangling brace close
+ ('\n[ \t]*(\n[ \t]*})', '\\1'),
+ # dangling newline
+ ('\n[ \t]*\n[ \t]*\n', '\n\n'),
+ # dangling parenthesis open
+ #('[ \t]*\n[ \t]*\([ \t]*\n', '('),
+ ('\([ \t]*\n', '('),
+ # dangling parenthesis close
+ ('\n[ \t]*\)', ')'),
+ # dangling comma
+ ('\n[ \t]*,', ','),
+ # dangling semicolon
+ ('\n[ \t]*;', ';'),
+ # brace open
+ ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'),
+ # brace open backslash
+ ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'),
+ # brace close
+ ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'),
+ # brace close backslash
+ ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'),
+ # delete space after `operator'
+ #('(\Woperator) (\W)', '\\1\\2'),
+ # delete space after case, label
+ ('(\W(case|label) ([\w]+)) :', '\\1:'),
+ # delete space before comma
+ ('[ \t]*,', ','),
+ # delete space before semicolon
+ ('[ \t]*;', ';'),
+ # delete space before eol-backslash
+ ('[ \t]*\\\\\n', '\\\n'),
+ # delete trailing whitespace
+ ('[ \t]*\n', '\n'),
+
+ ## Deuglify code that also gets ugly by rules above.
+ # delete newline after typedef struct
+ ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'),
+ # delete spaces around template brackets
+ #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'),
+ ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'),
+ ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'),
+ ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'),
+ ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'),
+ # do {..} while
+ ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'),
+
+ ## Fix code that gets broken by rules above.
+ ##('->\s+\*', '->*'),
+ # delete space before #define x()
+ ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('),
+ # add space in #define x ()
+ ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)',
+ '#define \\1 \\2'),
+ # delete space in #include <>
+ ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>',
+ '#include <\\1\\2\\3>'),
+ # delete backslash before empty line (emacs' indent region is broken)
+ ('\\\\\n\n', '\n\n'),
+ ],
+
+ COMMENT:
+ [
+ # delete trailing whitespace
+ ('[ \t]*\n', '\n'),
+ # delete empty first lines
+ ('(/\*\n)\n*', '\\1'),
+ # delete empty last lines
+ ('\n*(\n\*/)', '\\1'),
+ ## delete newline after start?
+ #('/(\*)\n', '\\1'),
+ ## delete newline before end?
+ #('\n(\*/)', '\\1'),
+ ],
+ }
# Recognize special sequences in the input.
#
# (?x) -- Ignore whitespace in patterns.
no_match = 'a\ba'
snippet_res = {
- CXX: {
- 'multiline_comment':
- r'''(?sx)
- (?P<match>
- (?P<code>
- [ \t]*/\*.*?\*/))''',
-
- 'singleline_comment':
- r'''(?mx)
- ^.*
- (?P<match>
- (?P<code>
- [ \t]*//([ \t][^\n]*|)\n))''',
-
- 'string':
- r'''(?x)
- (?P<match>
- (?P<code>
- "([^\"\n](\")*)*"))''',
-
- 'char':
- r'''(?x)
- (?P<match>
- (?P<code>
- '([^']+|\')))''',
-
- 'include':
- r'''(?x)
- (?P<match>
- (?P<code>
- "#[ \t]*include[ \t]*<[^>]*>''',
- },
- }
+ CXX: {
+ 'multiline_comment':
+ r'''(?sx)
+ (?P<match>
+ (?P<code>
+ [ \t]*/\*.*?\*/))''',
+
+ 'singleline_comment':
+ r'''(?mx)
+ ^.*
+ (?P<match>
+ (?P<code>
+ [ \t]*//([ \t][^\n]*|)\n))''',
+
+ 'string':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ "([^\"\n](\")*)*"))''',
+
+ 'char':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ '([^']+|\')))''',
+
+ 'include':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ "#[ \t]*include[ \t]*<[^>]*>''',
+ },
+ }
class Chunk:
- def replacement_text (self):
- return ''
+ def replacement_text (self):
+ return ''
- def filter_text (self):
- return self.replacement_text ()
+ def filter_text (self):
+ return self.replacement_text ()
class Substring (Chunk):
- def __init__ (self, source, start, end):
- self.source = source
- self.start = start
- self.end = end
-
- def replacement_text (self):
- s = self.source[self.start:self.end]
- if verbose_p:
- sys.stderr.write ('CXX Rules')
- for i in rules[CXX]:
- if verbose_p:
- sys.stderr.write ('.')
- #sys.stderr.write ('\n\n***********\n')
- #sys.stderr.write (i[0])
- #sys.stderr.write ('\n***********\n')
- #sys.stderr.write ('\n=========>>\n')
- #sys.stderr.write (s)
- #sys.stderr.write ('\n<<=========\n')
- s = re.sub (i[0], i[1], s)
- if verbose_p:
- sys.stderr.write ('done\n')
- return s
-
+ def __init__ (self, source, start, end):
+ self.source = source
+ self.start = start
+ self.end = end
+
+ def replacement_text (self):
+ s = self.source[self.start:self.end]
+ if verbose_p:
+ sys.stderr.write ('CXX Rules')
+ for i in rules[CXX]:
+ if verbose_p:
+ sys.stderr.write ('.')
+ #sys.stderr.write ('\n\n***********\n')
+ #sys.stderr.write (i[0])
+ #sys.stderr.write ('\n***********\n')
+ #sys.stderr.write ('\n=========>>\n')
+ #sys.stderr.write (s)
+ #sys.stderr.write ('\n<<=========\n')
+ s = re.sub (i[0], i[1], s)
+ if verbose_p:
+ sys.stderr.write ('done\n')
+ return s
+
class Snippet (Chunk):
- def __init__ (self, type, match, format):
- self.type = type
- self.match = match
- self.hash = 0
- self.options = []
- self.format = format
+ def __init__ (self, type, match, format):
+ self.type = type
+ self.match = match
+ self.hash = 0
+ self.options = []
+ self.format = format
- def replacement_text (self):
- return self.match.group ('match')
+ def replacement_text (self):
+ return self.match.group ('match')
- def substring (self, s):
- return self.match.group (s)
+ def substring (self, s):
+ return self.match.group (s)
- def __repr__ (self):
- return `self.__class__` + ' type = ' + self.type
+ def __repr__ (self):
+ return `self.__class__` + ' type = ' + self.type
class Multiline_comment (Snippet):
- def __init__ (self, source, match, format):
- self.type = type
- self.match = match
- self.hash = 0
- self.options = []
- self.format = format
-
- def replacement_text (self):
- s = self.match.group ('match')
- if verbose_p:
- sys.stderr.write ('COMMENT Rules')
- for i in rules[COMMENT]:
- if verbose_p:
- sys.stderr.write ('.')
- s = re.sub (i[0], i[1], s)
- return s
+ def __init__ (self, source, match, format):
+ self.type = type
+ self.match = match
+ self.hash = 0
+ self.options = []
+ self.format = format
+
+ def replacement_text (self):
+ s = self.match.group ('match')
+ if verbose_p:
+ sys.stderr.write ('COMMENT Rules')
+ for i in rules[COMMENT]:
+ if verbose_p:
+ sys.stderr.write ('.')
+ s = re.sub (i[0], i[1], s)
+ return s
snippet_type_to_class = {
- 'multiline_comment': Multiline_comment,
-# 'string': Multiline_comment,
-# 'include': Include_snippet,
+ 'multiline_comment': Multiline_comment,
+# 'string': Multiline_comment,
+# 'include': Include_snippet,
}
def find_toplevel_snippets (s, types):
- if verbose_p:
- sys.stderr.write ('Dissecting')
-
- res = {}
- for i in types:
- res[i] = re.compile (snippet_res[format][i])
-
- snippets = []
- index = 0
- ## found = dict (map (lambda x: (x, None),
- ## types))
- ## urg python2.1
- found = {}
- map (lambda x, f = found: f.setdefault (x, None),
- types)
-
- # We want to search for multiple regexes, without searching
- # the string multiple times for one regex.
- # Hence, we use earlier results to limit the string portion
- # where we search.
- # Since every part of the string is traversed at most once for
- # every type of snippet, this is linear.
-
- while 1:
- if verbose_p:
- sys.stderr.write ('.')
- first = None
- endex = 1 << 30
- for type in types:
- if not found[type] or found[type][0] < index:
- found[type] = None
- m = res[type].search (s[index:endex])
- if not m:
- continue
-
- cl = Snippet
- if snippet_type_to_class.has_key (type):
- cl = snippet_type_to_class[type]
- snip = cl (type, m, format)
- start = index + m.start ('match')
- found[type] = (start, snip)
-
- if found[type] \
- and (not first \
- or found[type][0] < found[first][0]):
- first = type
-
- # FIXME.
-
- # Limiting the search space is a cute
- # idea, but this *requires* to search
- # for possible containing blocks
- # first, at least as long as we do not
- # search for the start of blocks, but
- # always/directly for the entire
- # @block ... @end block.
-
- endex = found[first][0]
-
- if not first:
- snippets.append (Substring (s, index, len (s)))
- break
-
- (start, snip) = found[first]
- snippets.append (Substring (s, index, start))
- snippets.append (snip)
- found[first] = None
- index = start + len (snip.match.group ('match'))
-
- return snippets
+ if verbose_p:
+ sys.stderr.write ('Dissecting')
+
+ res = {}
+ for i in types:
+ res[i] = re.compile (snippet_res[format][i])
+
+ snippets = []
+ index = 0
+ ## found = dict (map (lambda x: (x, None),
+ ## types))
+ ## urg python2.1
+ found = {}
+ map (lambda x, f = found: f.setdefault (x, None),
+ types)
+
+ # We want to search for multiple regexes, without searching
+ # the string multiple times for one regex.
+ # Hence, we use earlier results to limit the string portion
+ # where we search.
+ # Since every part of the string is traversed at most once for
+ # every type of snippet, this is linear.
+
+ while 1:
+ if verbose_p:
+ sys.stderr.write ('.')
+ first = None
+ endex = 1 << 30
+ for type in types:
+ if not found[type] or found[type][0] < index:
+ found[type] = None
+ m = res[type].search (s[index:endex])
+ if not m:
+ continue
+
+ cl = Snippet
+ if snippet_type_to_class.has_key (type):
+ cl = snippet_type_to_class[type]
+ snip = cl (type, m, format)
+ start = index + m.start ('match')
+ found[type] = (start, snip)
+
+ if found[type] \
+ and (not first \
+ or found[type][0] < found[first][0]):
+ first = type
+
+ # FIXME.
+
+ # Limiting the search space is a cute
+ # idea, but this *requires* to search
+ # for possible containing blocks
+ # first, at least as long as we do not
+ # search for the start of blocks, but
+ # always/directly for the entire
+ # @block ... @end block.
+
+ endex = found[first][0]
+
+ if not first:
+ snippets.append (Substring (s, index, len (s)))
+ break
+
+ (start, snip) = found[first]
+ snippets.append (Substring (s, index, start))
+ snippets.append (snip)
+ found[first] = None
+ index = start + len (snip.match.group ('match'))
+
+ return snippets
def nitpick_file (outdir, file):
- s = open (file).read ()
-
- for i in rules[GLOBAL_CXX]:
- s = re.sub (i[0], i[1], s)
-
- # FIXME: Containing blocks must be first, see
- # find_toplevel_snippets.
- # We leave simple strings be part of the code
- snippet_types = (
- 'multiline_comment',
- 'singleline_comment',
- 'string',
-# 'char',
- )
-
- chunks = find_toplevel_snippets (s, snippet_types)
- #code = filter (lambda x: is_derived_class (x.__class__, Substring),
- # chunks)
-
- t = string.join (map (lambda x: x.filter_text (), chunks), '')
- fixt = file
- if s != t:
- if not outdir:
- os.system ('mv %s %s~' % (file, file))
- else:
- fixt = os.path.join (outdir,
- os.path.basename (file))
- h = open (fixt, "w")
- h.write (t)
- h.close ()
- if s != t or indent_p:
- indent_file (fixt)
+ s = open (file).read ()
+
+ for i in rules[GLOBAL_CXX]:
+ s = re.sub (i[0], i[1], s)
+
+ # FIXME: Containing blocks must be first, see
+ # find_toplevel_snippets.
+ # We leave simple strings be part of the code
+ snippet_types = (
+ 'multiline_comment',
+ 'singleline_comment',
+ 'string',
+# 'char',
+ )
+
+ chunks = find_toplevel_snippets (s, snippet_types)
+ #code = filter (lambda x: is_derived_class (x.__class__, Substring),
+ # chunks)
+
+ t = string.join (map (lambda x: x.filter_text (), chunks), '')
+ fixt = file
+ if s != t:
+ if not outdir:
+ os.system ('mv %s %s~' % (file, file))
+ else:
+ fixt = os.path.join (outdir,
+ os.path.basename (file))
+ h = open (fixt, "w")
+ h.write (t)
+ h.close ()
+ if s != t or indent_p:
+ indent_file (fixt)
def indent_file (file):
- emacs = '''emacs\
- --no-window-system\
- --batch\
- --no-site-file\
- --no-init-file\
- %(file)s\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "cc-mode")
- (c++-mode)
- (indent-region (point-min) (point-max))
- (if (buffer-modified-p (current-buffer))
- (save-buffer)))' ''' % vars ()
- emacsclient = '''emacsclient\
- --socket-name=%(socketdir)s/%(socketname)s\
- --no-wait\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "cc-mode")
- (find-file "%(file)s")
- (c++-mode)
- (indent-region (point-min) (point-max))
- (if (buffer-modified-p (current-buffer))
- (save-buffer)))' ''' \
- % { 'file': file,
- 'socketdir' : socketdir,
- 'socketname' : socketname, }
- if verbose_p:
- sys.stderr.write (emacs)
- sys.stderr.write ('\n')
- os.system (emacs)
+ emacs = '''emacs\
+ --no-window-system\
+ --batch\
+ --no-site-file\
+ --no-init-file\
+ %(file)s\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "cc-mode")
+ (c++-mode)
+ (indent-region (point-min) (point-max))
+ (if (buffer-modified-p (current-buffer))
+ (save-buffer)))' ''' % vars ()
+ emacsclient = '''emacsclient\
+ --socket-name=%(socketdir)s/%(socketname)s\
+ --no-wait\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "cc-mode")
+ (find-file "%(file)s")
+ (c++-mode)
+ (indent-region (point-min) (point-max))
+ (if (buffer-modified-p (current-buffer))
+ (save-buffer)))' ''' \
+ % { 'file': file,
+ 'socketdir' : socketdir,
+ 'socketname' : socketname, }
+ if verbose_p:
+ sys.stderr.write (emacs)
+ sys.stderr.write ('\n')
+ os.system (emacs)
def usage ():
- sys.stdout.write (r'''
+ sys.stdout.write (r'''
Usage:
fixcc [OPTION]... FILE...
Options:
- --help
- --indent reindent, even if no changes
- --verbose
- --test
+ --help
+ --indent reindent, even if no changes
+ --verbose
+ --test
Typical use with LilyPond:
- fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out)
+ fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out)
This script is licensed under the GNU GPL
''')
def do_options ():
- global indent_p, outdir, verbose_p
- (options, files) = getopt.getopt (sys.argv[1:], '',
- ['help', 'indent', 'outdir=',
- 'test', 'verbose'])
- for (o, a) in options:
- if o == '--help':
- usage ()
- sys.exit (0)
- elif o == '--indent':
- indent_p = 1
- elif o == '--outdir':
- outdir = a
- elif o == '--verbose':
- verbose_p = 1
- elif o == '--test':
- test ()
- sys.exit (0)
- else:
- assert unimplemented
- if not files:
- usage ()
- sys.exit (2)
- return files
+ global indent_p, outdir, verbose_p
+ (options, files) = getopt.getopt (sys.argv[1:], '',
+ ['help', 'indent', 'outdir=',
+ 'test', 'verbose'])
+ for (o, a) in options:
+ if o == '--help':
+ usage ()
+ sys.exit (0)
+ elif o == '--indent':
+ indent_p = 1
+ elif o == '--outdir':
+ outdir = a
+ elif o == '--verbose':
+ verbose_p = 1
+ elif o == '--test':
+ test ()
+ sys.exit (0)
+ else:
+ assert unimplemented
+ if not files:
+ usage ()
+ sys.exit (2)
+ return files
outdir = 0
socketname = 'fixcc%d' % os.getpid ()
def setup_client ():
- #--no-window-system\
- #--batch\
- os.unlink (os.path.join (socketdir, socketname))
- os.mkdir (socketdir, 0700)
- emacs='''emacs\
- --no-site-file\
- --no-init-file\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "server")
- (setq server-socket-dir "%(socketdir)s")
- (setq server-name "%(socketname)s")
- (server-start)
- (while t) (sleep 1000))' ''' \
- % { 'socketdir' : socketdir,
- 'socketname' : socketname, }
-
- if not os.fork ():
- os.system (emacs)
- sys.exit (0)
- while not os.path.exists (os.path.join (socketdir, socketname)):
- time.sleep (1)
+ #--no-window-system\
+ #--batch\
+ os.unlink (os.path.join (socketdir, socketname))
+ os.mkdir (socketdir, 0700)
+ emacs='''emacs\
+ --no-site-file\
+ --no-init-file\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "server")
+ (setq server-socket-dir "%(socketdir)s")
+ (setq server-name "%(socketname)s")
+ (server-start)
+ (while t) (sleep 1000))' ''' \
+ % { 'socketdir' : socketdir,
+ 'socketname' : socketname, }
+
+ if not os.fork ():
+ os.system (emacs)
+ sys.exit (0)
+ while not os.path.exists (os.path.join (socketdir, socketname)):
+ time.sleep (1)
def main ():
- #emacsclient should be faster, but this does not work yet
- #setup_client ()
- files = do_options ()
- if outdir and not os.path.isdir (outdir):
- os.makedirs (outdir)
- for i in files:
- sys.stderr.write ('%s...\n' % i)
- nitpick_file (outdir, i)
+ #emacsclient should be faster, but this does not work yet
+ #setup_client ()
+ files = do_options ()
+ if outdir and not os.path.isdir (outdir):
+ os.makedirs (outdir)
+ for i in files:
+ sys.stderr.write ('%s...\n' % i)
+ nitpick_file (outdir, i)
## TODO: make this compilable and check with g++
};
typedef struct
{
- Foo **bar;
+ Foo **bar;
} String;
ostream &
typedef struct _t_ligature
{
- char *succ, *lig;
- struct _t_ligature * next;
+ char *succ, *lig;
+ struct _t_ligature * next;
} AFM_Ligature;
-
+
typedef std::map < AFM_Ligature const *, int > Bar;
- /**
- (c) 1997--2006 Han-Wen Nienhuys <hanwen@cs.uu.nl>
- */
-
-/* ||
- * vv
- * !OK OK
+ /**
+ (c) 1997--2006 Han-Wen Nienhuys <hanwen@cs.uu.nl>
*/
+
+/* ||
+* vv
+* !OK OK
+*/
/* ||
- vv
- !OK OK
- */
+ vv
+ !OK OK
+*/
char *
Foo:: foo ()
{
int
i
;
- char* a= &++ i ;
- a [*++ a] = (char*) foe (*i, &bar) *
- 2;
- int operator double ();
- std::map<char*,int> y =*bar(-*a ,*b);
- Interval_t<T> & operator*= (T r);
- Foo<T>*c;
- int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2);
- delete *p;
- if (abs (f)*2 > abs (d) *FUDGE)
- ;
- while (0);
- for (; i<x foo(); foo>bar);
- for (; *p && > y;
- foo > bar)
+ char* a= &++ i ;
+ a [*++ a] = (char*) foe (*i, &bar) *
+ 2;
+ int operator double ();
+ std::map<char*,int> y =*bar(-*a ,*b);
+ Interval_t<T> & operator*= (T r);
+ Foo<T>*c;
+ int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2);
+ delete *p;
+ if (abs (f)*2 > abs (d) *FUDGE)
+ ;
+ while (0);
+ for (; i<x foo(); foo>bar);
+ for (; *p && > y;
+ foo > bar)
;
- do {
- ;;;
- }
- while (foe);
-
- squiggle. extent;
- 1 && * unsmob_moment (lf);
- line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm
+ do {
+ ;;;
+ }
+ while (foe);
+
+ squiggle. extent;
+ 1 && * unsmob_moment (lf);
+ line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm
(): SCM_EOL);
- case foo: k;
+ case foo: k;
- if (0) {a=b;} else {
- c=d;
- }
+ if (0) {a=b;} else {
+ c=d;
+ }
- cookie_io_functions_t Memory_out_stream::functions_ = {
- Memory_out_stream::reader,
- ...
- };
+ cookie_io_functions_t Memory_out_stream::functions_ = {
+ Memory_out_stream::reader,
+ ...
+ };
- int compare (Array < Pitch> *, Array < Pitch> *);
- original_ = (Grob *) & s;
- Drul_array< Link_array<Grob> > o;
+ int compare (Array < Pitch> *, Array < Pitch> *);
+ original_ = (Grob *) & s;
+ Drul_array< Link_array<Grob> > o;
}
- header_.char_info_pos = (6 + header_length) * 4;
- return ly_bool2scm (*ma < * mb);
+ header_.char_info_pos = (6 + header_length) * 4;
+ return ly_bool2scm (*ma < * mb);
- 1 *::sign(2);
+ 1 *::sign(2);
- (shift) *-d;
+ (shift) *-d;
- a = 0 ? *x : *y;
+ a = 0 ? *x : *y;
a = "foo() 2,2,4";
{
- if (!span_)
- {
- span_ = make_spanner ("StaffSymbol", SCM_EOL);
- }
+ if (!span_)
+ {
+ span_ = make_spanner ("StaffSymbol", SCM_EOL);
+ }
}
{
- if (!span_)
- {
- span_ = make_spanner (StaffSymbol, SCM_EOL);
- }
+ if (!span_)
+ {
+ span_ = make_spanner (StaffSymbol, SCM_EOL);
+ }
}
'''
def test ():
- test_file = 'fixcc.cc'
- open (test_file, 'w').write (TEST)
- nitpick_file (outdir, test_file)
- sys.stdout.write (open (test_file).read ())
+ test_file = 'fixcc.cc'
+ open (test_file, 'w').write (TEST)
+ nitpick_file (outdir, test_file)
+ sys.stdout.write (open (test_file).read ())
if __name__ == '__main__':
- main ()
+ main ()
import string
(options, files) = \
- getopt.getopt (sys.argv[1:],
- '',
- ['dir='])
+ getopt.getopt (sys.argv[1:],
+ '',
+ ['dir='])
outdir = ''
for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--dir':
- outdir = a
- else:
- print o
- raise getopt.error
+ o = opt[0]
+ a = opt[1]
+ if o == '--dir':
+ outdir = a
+ else:
+ print o
+ raise getopt.error
# Ugh
for design_size in [11,13,14,16,18,20,23,26]:
- name = 'Emmentaler'
- filename = name.lower ()
- script = '''#!@FONTFORGE@
+ name = 'Emmentaler'
+ filename = name.lower ()
+ script = '''#!@FONTFORGE@
New();
# load nummer/din after setting PUA.
i = 0;
while (i < CharCnt())
- Select(i);
+ Select(i);
# crashes fontforge, use PUA for now -- jcn
# SetUnicodeValue(i + 0xF0000, 0);
/*
PRIVATE AREA
- In the BMP, the range 0xe000 to 0xf8ff will never be assigned to any
- characters by the standard and is reserved for private usage. For the
- Linux community, this private area has been subdivided further into the
- range 0xe000 to 0xefff which can be used individually by any end-user
- and the Linux zone in the range 0xf000 to 0xf8ff where extensions are
- coordinated among all Linux users. The registry of the characters
- assigned to the Linux zone is currently maintained by H. Peter Anvin
- <Peter.Anvin@linux.org>.
+ In the BMP, the range 0xe000 to 0xf8ff will never be assigned to any
+ characters by the standard and is reserved for private usage. For the
+ Linux community, this private area has been subdivided further into the
+ range 0xe000 to 0xefff which can be used individually by any end-user
+ and the Linux zone in the range 0xf000 to 0xf8ff where extensions are
+ coordinated among all Linux users. The registry of the characters
+ assigned to the Linux zone is currently maintained by H. Peter Anvin
+ <Peter.Anvin@linux.org>.
*/
- SetUnicodeValue(i + 0xE000, 0);
- ++i;
+ SetUnicodeValue(i + 0xE000, 0);
+ ++i;
endloop
Generate("%(filename)s-%(design_size)d.svg");
''' % vars()
- basename = '%s-%d' % (filename, design_size)
- path = os.path.join (outdir, basename + '.pe')
- open (path, 'w').write (script)
+ basename = '%s-%d' % (filename, design_size)
+ path = os.path.join (outdir, basename + '.pe')
+ open (path, 'w').write (script)
- subfonts = ['feta%(design_size)d',
- 'parmesan%(design_size)d',
- 'feta-alphabet%(design_size)d']
+ subfonts = ['feta%(design_size)d',
+ 'parmesan%(design_size)d',
+ 'feta-alphabet%(design_size)d']
- ns = []
- for s in subfonts:
- ns.append ('%s' % (s % vars()))
-
- subfonts_str = string.join (ns)
-
- open (os.path.join (outdir, '%(filename)s-%(design_size)d.subfonts' % vars()), 'w').write (subfonts_str)
+ ns = []
+ for s in subfonts:
+ ns.append ('%s' % (s % vars()))
+
+ subfonts_str = string.join (ns)
+
+ open (os.path.join (outdir, '%(filename)s-%(design_size)d.subfonts' % vars()), 'w').write (subfonts_str)
- path = os.path.join (outdir, '%s-%d.dep' % (filename, design_size))
+ path = os.path.join (outdir, '%s-%d.dep' % (filename, design_size))
- deps = r'''%(filename)s-%(design_size)d.otf: $(outdir)/feta%(design_size)d.pfa \
- $(outdir)/parmesan%(design_size)d.pfa \
- $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-table \
- $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-gtable
+ deps = r'''%(filename)s-%(design_size)d.otf: $(outdir)/feta%(design_size)d.pfa \
+ $(outdir)/parmesan%(design_size)d.pfa \
+ $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-table \
+ $(outdir)/feta-alphabet%(design_size)d.pfa feta%(design_size)d.otf-gtable
''' % vars()
- open (path, 'w').write (deps)
+ open (path, 'w').write (deps)
- open (os.path.join (outdir, basename + '.fontname'), 'w').write ("%s-%d" % (name, design_size))
+ open (os.path.join (outdir, basename + '.fontname'), 'w').write ("%s-%d" % (name, design_size))
os.chdir(dir)
def system (c):
- print c
- if os.system (c):
- raise 'barf'
+ print c
+ if os.system (c):
+ raise 'barf'
outputs = []
for sz in [48,32,16] :
-
- for depth in [24,8]:
- out = '%(base)s-%(sz)d-%(depth)d.png' % locals()
- system ('convert -depth %(depth)d -sample %(sz)d %(input)s %(out)s' %
- locals ())
- outputs.append (out)
-
+
+ for depth in [24,8]:
+ out = '%(base)s-%(sz)d-%(depth)d.png' % locals()
+ system ('convert -depth %(depth)d -sample %(sz)d %(input)s %(out)s' %
+ locals ())
+ outputs.append (out)
+
system('icotool --output %s --create %s' % (output, ' '.join (outputs)))
-system('rm -rf %(dir)s' % locals())
+system('rm -rf %(dir)s' % locals())
# keywords not otherwise found
for line in ['include','maininput','version']:
- kw = kw + [line]
+ kw = kw + [line]
# the main keywords
F = open('lily/lily-lexer.cc', 'r')
for line in F.readlines():
- m = re.search(r"(\s*{\")(.*)(\",\s*.*},\s*\n)",line)
- if m:
- kw = kw + [m.group(2)]
+ m = re.search(r"(\s*{\")(.*)(\",\s*.*},\s*\n)",line)
+ if m:
+ kw = kw + [m.group(2)]
F.close()
# keywords in markup
F = open('scm/markup.scm', 'r')
for line in F.readlines():
- m = re.search(r"^(\s*\(cons\s*)([a-z-]*)(-markup)",line)
- if m:
- kw = kw + [m.group(2)]
+ m = re.search(r"^(\s*\(cons\s*)([a-z-]*)(-markup)",line)
+ if m:
+ kw = kw + [m.group(2)]
F.close()
# identifiers and keywords
'ly/script-init.ly',
'ly/spanners-init.ly',
]:
- F = open(name, 'r')
- for line in F.readlines():
- m = re.search(r"^([a-zA-Z]+)(\s*=)",line)
- if m:
- kw = kw + [m.group(1)]
- F.close()
+ F = open(name, 'r')
+ for line in F.readlines():
+ m = re.search(r"^([a-zA-Z]+)(\s*=)",line)
+ if m:
+ kw = kw + [m.group(1)]
+ F.close()
# more identifiers
for name in [
'ly/declarations-init.ly',
'ly/params-init.ly',
]:
- F = open(name, 'r')
- for line in F.readlines():
- m = re.search(r"^(\s*)([a-zA-Z]+)(\s*=)",line)
- if m:
- kw = kw + [m.group(2)]
- F.close()
+ F = open(name, 'r')
+ for line in F.readlines():
+ m = re.search(r"^(\s*)([a-zA-Z]+)(\s*=)",line)
+ if m:
+ kw = kw + [m.group(2)]
+ F.close()
# note names
for name in [
'ly/svenska.ly',
'ly/vlaams.ly',
]:
- F = open(name, 'r')
- for line in F.readlines():
- m = re.search(r"^(\s*\()([a-z]+)([^l]+ly:make-pitch)",line)
- if m:
- notes = notes + ['' + m.group(2)]
- F.close()
+ F = open(name, 'r')
+ for line in F.readlines():
+ m = re.search(r"^(\s*\()([a-z]+)([^l]+ly:make-pitch)",line)
+ if m:
+ notes = notes + ['' + m.group(2)]
+ F.close()
-
+
# reserved words
for name in [
'ly/engraver-init.ly',
'ly/performer-init.ly',
]:
- F = open(name, 'r')
- for line in F.readlines():
- for pattern in [
- r"^(\s*.consists\s+\")([a-zA-Z_]+)(\")",
- r"([\\]name\s+[\"]?)([a-zA-Z_]+)([\"]?)",
- r"(\s+)([a-zA-Z_]+)(\s*[\\]((set)|(override)))",
- ]:
- m = re.search(pattern,line)
- if m:
- rw = rw + ['' + m.group(2)]
- F.close()
+ F = open(name, 'r')
+ for line in F.readlines():
+ for pattern in [
+ r"^(\s*.consists\s+\")([a-zA-Z_]+)(\")",
+ r"([\\]name\s+[\"]?)([a-zA-Z_]+)([\"]?)",
+ r"(\s+)([a-zA-Z_]+)(\s*[\\]((set)|(override)))",
+ ]:
+ m = re.search(pattern,line)
+ if m:
+ rw = rw + ['' + m.group(2)]
+ F.close()
# the output file
outdir = '.';
suffix = ['skip','skip','skip'];
outs = ['','',''];
for s in sys.argv[1:]:
- if s == '--words':
- suffix[0] = '';
- if s == '--el':
- suffix[1] = '.el';
- if s == '--vim':
- suffix[2] = '.vim';
- m = re.search(r"(--dir=)(\S*)",s)
- if m:
- outdir = m.group(2)
+ if s == '--words':
+ suffix[0] = '';
+ if s == '--el':
+ suffix[1] = '.el';
+ if s == '--vim':
+ suffix[2] = '.vim';
+ m = re.search(r"(--dir=)(\S*)",s)
+ if m:
+ outdir = m.group(2)
if '' in suffix:
- outs[0] = open(outdir+'/lilypond-words'+suffix[0], 'w')
+ outs[0] = open(outdir+'/lilypond-words'+suffix[0], 'w')
if '.el' in suffix:
- outs[1] = open(outdir+'/lilypond-words'+suffix[1], 'w')
+ outs[1] = open(outdir+'/lilypond-words'+suffix[1], 'w')
if '.vim' in suffix:
- outs[2] = open(outdir+'/lilypond-words'+suffix[2], 'w')
+ outs[2] = open(outdir+'/lilypond-words'+suffix[2], 'w')
# alphabetically ordered words
kw.sort()
kw.reverse()
prevline = ''
if '.vim' in suffix:
- outs[2].write('syn match lilyKeyword \"[-_^]\\?\\\\\\(');
+ outs[2].write('syn match lilyKeyword \"[-_^]\\?\\\\\\(');
for line in kw:
- if line != prevline:
- if '' in suffix:
- outs[0].write('\\\\' + line + '\n')
- if '.el' in suffix:
- outs[1].write('\\\\' + line + '\n')
- if '.vim' in suffix:
- outs[2].write(line + '\\|')
- prevline = line
+ if line != prevline:
+ if '' in suffix:
+ outs[0].write('\\\\' + line + '\n')
+ if '.el' in suffix:
+ outs[1].write('\\\\' + line + '\n')
+ if '.vim' in suffix:
+ outs[2].write(line + '\\|')
+ prevline = line
if '.vim' in suffix:
- outs[2].write('n\\)\\(\\A\\|\\n\\)\"me=e-1\n')
+ outs[2].write('n\\)\\(\\A\\|\\n\\)\"me=e-1\n')
rw.sort()
rw.reverse()
prevline = ''
if '.vim' in suffix:
- outs[2].write('syn match lilyReservedWord \"\\(\\A\\|\\n\\)\\(')
+ outs[2].write('syn match lilyReservedWord \"\\(\\A\\|\\n\\)\\(')
for line in rw:
- if line != prevline:
- if '' in suffix:
- outs[0].write(line + '\n')
- if '.el' in suffix:
- outs[1].write(line + '\n')
- if '.vim' in suffix:
- outs[2].write(line + '\\|')
- prevline = line
+ if line != prevline:
+ if '' in suffix:
+ outs[0].write(line + '\n')
+ if '.el' in suffix:
+ outs[1].write(line + '\n')
+ if '.vim' in suffix:
+ outs[2].write(line + '\\|')
+ prevline = line
if '.vim' in suffix:
- outs[2].write('Score\\)\\(\\A\\|\\n\\)\"ms=s+1,me=e-1\n')
+ outs[2].write('Score\\)\\(\\A\\|\\n\\)\"ms=s+1,me=e-1\n')
notes.sort()
notes.reverse()
prevline = ''
if '.vim' in suffix:
- outs[2].write('syn match lilyNote \"\\<\\(\\(\\(');
+ outs[2].write('syn match lilyNote \"\\<\\(\\(\\(');
for line in notes:
- if line != prevline:
- if '' in suffix:
- outs[0].write(line + '\n')
- if '.el' in suffix:
- outs[1].write(line + '\n')
- if '.vim' in suffix:
- outs[2].write(line + '\\|')
- prevline = line
+ if line != prevline:
+ if '' in suffix:
+ outs[0].write(line + '\n')
+ if '.el' in suffix:
+ outs[1].write(line + '\n')
+ if '.vim' in suffix:
+ outs[2].write(line + '\\|')
+ prevline = line
if '.vim' in suffix:
- outs[2].write('a\\)\\([,\']\\)\\{,4}\\([?!]\\)\\?\\)\\|s\\|r\\)\\(\\(128\\|64\\|32\\|16\\|8\\|4\\|2\\|1\\|\\\\breve\\|\\\\longa\\|\\\\maxima\\)[.]\\{,8}\\)\\?\\(\\A\\|\\n\\)\"me=e-1\n')
+ outs[2].write('a\\)\\([,\']\\)\\{,4}\\([?!]\\)\\?\\)\\|s\\|r\\)\\(\\(128\\|64\\|32\\|16\\|8\\|4\\|2\\|1\\|\\\\breve\\|\\\\longa\\|\\\\maxima\\)[.]\\{,8}\\)\\?\\(\\A\\|\\n\\)\"me=e-1\n')
# the menu in lilypond-mode.el
for line in [
'//times - % { _ } -',
'//transpose - % { _ } -',
]:
- # urg. escape char '/' is replaced with '\\' which python writes as a '\'.
- if '.el' in suffix:
- outs[1].write(string.join(string.split(line,'/'),'\\') + '\n')
-
+ # urg. escape char '/' is replaced with '\\' which python writes as a '\'.
+ if '.el' in suffix:
+ outs[1].write(string.join(string.split(line,'/'),'\\') + '\n')
+
if '' in suffix:
- outs[0].close()
+ outs[0].close()
if '.el' in suffix:
- outs[1].close()
+ outs[1].close()
if '.vim' in suffix:
- outs[2].close()
+ outs[2].close()
'''
TODO:
- * Add @nodes, plit at sections?
- * Less kludged first introduction file
- * include *.texi files for text at start of section?
+ * Add @nodes, plit at sections?
+ * Less kludged first introduction file
+ * include *.texi files for text at start of section?
'''
program_name = 'lys-to-tely'
def help ():
- sys.stdout.write (r"""Usage: lys-to-tely [OPTIONS]... LY-FILE...
+ sys.stdout.write (r"""Usage: lys-to-tely [OPTIONS]... LY-FILE...
Construct tely doc from LY-FILEs.
Options:
- -h, --help print this help
- -o, --output=NAME write tely doc to NAME
- -t, --title=TITLE set tely doc title TITLE
+ -h, --help print this help
+ -o, --output=NAME write tely doc to NAME
+ -t, --title=TITLE set tely doc title TITLE
""")
- sys.exit (0)
+ sys.exit (0)
(options, files) = getopt.getopt(sys.argv[1:], 'hn:t:', [
- 'help', 'name=', 'title='])
+ 'help', 'name=', 'title='])
name="ly-doc"
title="Ly Doc"
for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '-h' or o == '--help':
- help ()
- elif o == '-n' or o == '--name':
- name = a
- elif o == '-t' or o == '--title':
- title = a
- else:
- raise 'unknown opt ', o
+ o = opt[0]
+ a = opt[1]
+ if o == '-h' or o == '--help':
+ help ()
+ elif o == '-n' or o == '--name':
+ name = a
+ elif o == '-t' or o == '--title':
+ title = a
+ else:
+ raise 'unknown opt ', o
def strip_extension (f, ext):
- (p, e) = os.path.splitext (f)
- if e == ext:
- e = ''
- return p + e
+ (p, e) = os.path.splitext (f)
+ if e == ext:
+ e = ''
+ return p + e
if files:
- dir = os.path.dirname (name)
- if not dir:
- dir = "."
- name = strip_extension (os.path.basename (name), ".tely")
+ dir = os.path.dirname (name)
+ if not dir:
+ dir = "."
+ name = strip_extension (os.path.basename (name), ".tely")
- s = '''\input texinfo
+ s = '''\input texinfo
@setfilename %s.info
@settitle %s
@end iftex
@finalout @c we do not want black boxes.
-
+
@c fool ls-latex
@ignore
@author Han-Wen Nienhuys and Jan Nieuwenhuizen
@node Top, , , (dir)
''' % (name, title, title)
- def name2line (n):
- # UGR
- s = r"""
+ def name2line (n):
+ # UGR
+ s = r"""
@ifhtml
@html
<A NAME="%s"></A>
@end html
@end ifhtml
""" % n
-
- s += "\n\n@lilypondfile[printfilename,texidoc]{%s}" % n
- return s
- files.sort ()
- s = s + string.join (map (lambda x: name2line (x), files), "\n")
- s = s + '\n@bye\n'
- f = "%s/%s.tely" % (dir, name)
- sys.stderr.write ("%s: writing %s..." % (program_name, f))
- h = open (f, "w")
- h.write (s)
- h.close ()
- sys.stderr.write ('\n')
+
+ s += "\n\n@lilypondfile[printfilename,texidoc]{%s}" % n
+ return s
+ files.sort ()
+ s = s + string.join (map (lambda x: name2line (x), files), "\n")
+ s = s + '\n@bye\n'
+ f = "%s/%s.tely" % (dir, name)
+ sys.stderr.write ("%s: writing %s..." % (program_name, f))
+ h = open (f, "w")
+ h.write (s)
+ h.close ()
+ sys.stderr.write ('\n')
else:
- # not Unix philosophy, but hey, at least we notice when
- # we don't distribute any .ly files.
- sys.stderr.write ("No files specified. Doing nothing")
+ # not Unix philosophy, but hey, at least we notice when
+ # we don't distribute any .ly files.
+ sys.stderr.write ("No files specified. Doing nothing")
# Read some global vars
class Afm_reader:
- def __init__ (self, filename):
- self.filename = filename
- self.lines = open (self.filename).readlines ()
-
- def get_afm (self):
- afm = Afm_font_metric (self.filename)
- for i in self.lines[:20]:
- m = re.match ('([^ \t\n]*)[ \t]*(.*[^ \t\n])', i)
- if m and m.group (1):
- key = m.group (1)
- value = m.group (2)
- if key != 'Comment':
- afm.__dict__[key] = value
- return afm
+ def __init__ (self, filename):
+ self.filename = filename
+ self.lines = open (self.filename).readlines ()
+
+ def get_afm (self):
+ afm = Afm_font_metric (self.filename)
+ for i in self.lines[:20]:
+ m = re.match ('([^ \t\n]*)[ \t]*(.*[^ \t\n])', i)
+ if m and m.group (1):
+ key = m.group (1)
+ value = m.group (2)
+ if key != 'Comment':
+ afm.__dict__[key] = value
+ return afm
class Afm_font_metric:
- def __init__ (self, filename):
- m = re.match ('.*/(.+)', filename)
- self.filename = m.group (1)
- m = re.match ('([-_A-Za-z]*)([0-9]*)', self.filename)
- self.name = m.group (1) + m.group (2)
- self.basename = m.group (1)
- self.designsize = m.group (2)
-
+ def __init__ (self, filename):
+ m = re.match ('.*/(.+)', filename)
+ self.filename = m.group (1)
+ m = re.match ('([-_A-Za-z]*)([0-9]*)', self.filename)
+ self.name = m.group (1) + m.group (2)
+ self.basename = m.group (1)
+ self.designsize = m.group (2)
+
def read_afm_file (filename):
- reader = Afm_reader (filename)
- return reader.get_afm ()
+ reader = Afm_reader (filename)
+ return reader.get_afm ()
#if __name__ == '__main__':
-# i = read_afm_file (sys.argv[1])
-# print i, i.FullName, i.FontName
+# i = read_afm_file (sys.argv[1])
+# print i, i.FullName, i.FontName
### mftrace
class Font_info:
- cm = {
- 'bx': ('bold', 'roman'),
- 'bxti' : ('bold', 'italic'),
- 'csc' : ('smallcaps', 'roman'),
- 'r' : ('regular', 'roman'),
- 'ss' : ('regular', 'sansserif'),
- 'tt' : ('regular', 'typewriter'),
- 'ti' : ('regular', 'italic'),
- }
-
- def set_defaults (self, name):
- self.FontName = name
- self.FullName = name
- self.EncodingScheme = 'AdobeStandard'
-
- self.foundry = 'GNU'
- self.family = 'LilyPond'
- self.weight = 'Feta'
- self.slant = 'r'
- self.setwidth = 'normal'
- self.style = ''
- self.pixelsize = '0'
- self.pointsize = '0'
- self.xresolution = '0'
- self.yresolution = '0'
- self.spacing = 'p'
- self.averagewidth = '0'
- self.registry = 'GNU'
- self.encoding = 'FontSpecific'
-
- split = string.split (name, '-')
- if len (split) >= 4:
- # Assume
- # Adobe FontName = X11 foundry-family-weight-style
- if 1:
- self.foundry, self.family = split[:2]
- else: # testin'
- self.foundry = split[0]
- self.family = string.join (split[1:-2], ' ')
- self.weight = string.join (split[2:-1], ' ')
- self.style = split[-1:][0]
- self.FamilyName = '%s %s' % (self.family, self.weight)
- self.designsize = self.style
- elif name[:2] == 'cm':
- self.foundry = 'TeX' # Knuth?
- self.FamilyName = 'Computer Modern'
- self.family = self.FamilyName
- m = re.match ('^cm([a-z]*)([0-9]*)', name)
- self.weight = string.join (self.cm[m.group (1)], ' ')
- self.designsize = m.group (2)
- self.style = self.designsize
- else:
- self.FamilyName = name
-
- def __init__ (self, x):
- if type (x) == type ("hallo"):
- m = re.match ('([-_A-Za-z]*)([0-9]*)', x)
- self.name = x
- self.basename = m.group (1)
- self.designsize = m.group (2)
- self.set_defaults (x)
- elif type (x) == type ({}):
- self.set_defaults (x['FontName'])
- for k in x.keys ():
- self.__dict__[k] = x[k]
-
- def __getitem__ (self, key):
- return self.__dict__[key]
-
- def get_X11 (self):
- return (self.foundry, self.family, self.weight,
- self.slant, self.setwidth, self.style,
- self.pixelsize, self.pointsize,
- self.xresolution, self.yresolution,
- self.spacing, self.averagewidth,
- self.registry, self.encoding)
+ cm = {
+ 'bx': ('bold', 'roman'),
+ 'bxti' : ('bold', 'italic'),
+ 'csc' : ('smallcaps', 'roman'),
+ 'r' : ('regular', 'roman'),
+ 'ss' : ('regular', 'sansserif'),
+ 'tt' : ('regular', 'typewriter'),
+ 'ti' : ('regular', 'italic'),
+ }
+
+ def set_defaults (self, name):
+ self.FontName = name
+ self.FullName = name
+ self.EncodingScheme = 'AdobeStandard'
+
+ self.foundry = 'GNU'
+ self.family = 'LilyPond'
+ self.weight = 'Feta'
+ self.slant = 'r'
+ self.setwidth = 'normal'
+ self.style = ''
+ self.pixelsize = '0'
+ self.pointsize = '0'
+ self.xresolution = '0'
+ self.yresolution = '0'
+ self.spacing = 'p'
+ self.averagewidth = '0'
+ self.registry = 'GNU'
+ self.encoding = 'FontSpecific'
+
+ split = string.split (name, '-')
+ if len (split) >= 4:
+ # Assume
+ # Adobe FontName = X11 foundry-family-weight-style
+ if 1:
+ self.foundry, self.family = split[:2]
+ else: # testin'
+ self.foundry = split[0]
+ self.family = string.join (split[1:-2], ' ')
+ self.weight = string.join (split[2:-1], ' ')
+ self.style = split[-1:][0]
+ self.FamilyName = '%s %s' % (self.family, self.weight)
+ self.designsize = self.style
+ elif name[:2] == 'cm':
+ self.foundry = 'TeX' # Knuth?
+ self.FamilyName = 'Computer Modern'
+ self.family = self.FamilyName
+ m = re.match ('^cm([a-z]*)([0-9]*)', name)
+ self.weight = string.join (self.cm[m.group (1)], ' ')
+ self.designsize = m.group (2)
+ self.style = self.designsize
+ else:
+ self.FamilyName = name
+
+ def __init__ (self, x):
+ if type (x) == type ("hallo"):
+ m = re.match ('([-_A-Za-z]*)([0-9]*)', x)
+ self.name = x
+ self.basename = m.group (1)
+ self.designsize = m.group (2)
+ self.set_defaults (x)
+ elif type (x) == type ({}):
+ self.set_defaults (x['FontName'])
+ for k in x.keys ():
+ self.__dict__[k] = x[k]
+
+ def __getitem__ (self, key):
+ return self.__dict__[key]
+
+ def get_X11 (self):
+ return (self.foundry, self.family, self.weight,
+ self.slant, self.setwidth, self.style,
+ self.pixelsize, self.pointsize,
+ self.xresolution, self.yresolution,
+ self.spacing, self.averagewidth,
+ self.registry, self.encoding)
fontinfo = {}
sketch_p = 0
sodipodi_p = 0
if len (ls) and ls[0] == 'sodipodi':
- ls = ls[1:]
- sodipodi_p = 1
+ ls = ls[1:]
+ sodipodi_p = 1
elif len (ls) and ls[0] == 'sketch':
- ls = ls[1:]
- sketch_p = 1
+ ls = ls[1:]
+ sketch_p = 1
if not (sketch_p or sodipodi_p):
- print len(ls)
-
+ print len(ls)
+
for filename in ls:
- basename = re.sub ('\.pf[ab]', '',filename)
- fontname = re.sub ('-', ' ',basename)
-
- m = re.search ("([0-9]+)$", fontname)
- designsize = 'normal'
-
-
- if m:
- designsize = m.group (1)
- fontbase = re.sub ("([0-9]+)$", "", fontname)
-
-
- # FIXME: Font naming -- what a mess
- # Check sane naming with xfontsel and gtkfontsel
-
- # Adobe's font naming scheme and X11's seem to be conflicting.
- # Adobe's FontFamily seems to be X11's family + weight
- # Also, text selection applets like gtkfontsel, gfontview and
- # GNOME-applications specific ones, display X11's `family'
- # parameter as `Font', and X11's `Weight' parameter as `Style'.
-
- # Using X11 description/convention -- good for xfontsel:
- # 1 foundry: GNU
- # 2 family: LilyPond <basename>
- # 3 weight: <designsize>
- # 4 slant: r(oman) =upright
- # 5 setwidth: normal
- # 6 style:
- # 7 pixelsize: 0
- # 8 pointsize: 0 (20 crashes xfs, moved to style)
- # 9 xresolution: 0
- # 10 yresolution: 0
- # 11 spacing: p(roportional)
- # 12 averagewidth: 0
- # 13 registry: GNU
- # 14 encoding: fonstpecific
-
- # gives:
- # feta20.pfa -GNU-LilyPond feta-20-r-normal--0-0-0-0-p-0-gnu-fontspecific
-
- # However, GNOME (gtkfontsel, gnome apps) seems to want:
-
- # 1 foundry: GNU
- # 2 family: LilyPond
- # 3 weight: <basename>
- # 4 slant: r(oman) =upright
- # 5 setwidth: normal
- # 6 style: <designsize>
- # 7 pixelsize: 0
- # 8 pointsize: 0 (20 crashes xfs, moved to style)
- # 9 xresolution: 0
- # 10 yresolution: 0
- # 11 spacing: p(roportional)
- # 12 averagewidth: 0
- # 13 registry: GNU
- # 14 encoding: fonstpecific
-
- # which gives:
- # feta20.pfa -GNU-LilyPond-feta-r-normal--20-0-0-0-p-0-gnu-fontspecific
- # foundry: GNU
-
- ## ouch, pointsize 20 crashes xfs
- ## XXXfeta20.pfa -GNU-LilyPond Feta-regular-r-normal--0-20-0-0-p-0-gnu-fontspecific
-
- ## feta20.pfa -GNU-LilyPond feta-regular-r-normal-20-0-0-0-0-p-0-gnu-fontspecific
-
- afmfile = ''
- if not afmfile:
- #afmfile = find_file (basename + '.afm')
- afmfile = basename + '.afm'
-
- if afmfile:
- afmfile = os.path.abspath (afmfile)
- if os.path.exists (afmfile):
- afm = read_afm_file (afmfile)
- fontinfo = Font_info (afm.__dict__)
- else:
- fontinfo = Font_info (basename)
-
- family_name = string.join (string.split (fontinfo['FamilyName'],
- '-'), ' ')
-
- if sodipodi_p:
- print string.join ((os.path.abspath (filename),
- fontinfo.FamilyName,
- fontinfo.FamilyName,''
- ),
-
- ',')
-
- elif sketch_p:
- # Sketch's lilypond.sfd map:
- s = string.join ([fontinfo.FontName,
- fontinfo.family,
- '%s %s' % (fontinfo.weight, fontinfo.style),
- string.join (fontinfo.get_X11 ()[:4], '-'),
- string.join (fontinfo.get_X11 ()[-2:], '-'),
- fontinfo.name],
- ',')
- print s
-
- s = string.join ([fontinfo.FamilyName + fontinfo.designsize,
- fontinfo.family,
- '%s %s' % (fontinfo.weight, fontinfo.style),
- string.join (fontinfo.get_X11 ()[:4], '-'),
- string.join (fontinfo.get_X11 ()[-2:], '-'),
- fontinfo.name],
- ',')
- print s
- else:
- print filename + ' -' + string.join (fontinfo.get_X11 (), '-')
+ basename = re.sub ('\.pf[ab]', '',filename)
+ fontname = re.sub ('-', ' ',basename)
+
+ m = re.search ("([0-9]+)$", fontname)
+ designsize = 'normal'
+
+
+ if m:
+ designsize = m.group (1)
+ fontbase = re.sub ("([0-9]+)$", "", fontname)
+
+
+ # FIXME: Font naming -- what a mess
+ # Check sane naming with xfontsel and gtkfontsel
+
+ # Adobe's font naming scheme and X11's seem to be conflicting.
+ # Adobe's FontFamily seems to be X11's family + weight
+ # Also, text selection applets like gtkfontsel, gfontview and
+ # GNOME-applications specific ones, display X11's `family'
+ # parameter as `Font', and X11's `Weight' parameter as `Style'.
+
+ # Using X11 description/convention -- good for xfontsel:
+ # 1 foundry: GNU
+ # 2 family: LilyPond <basename>
+ # 3 weight: <designsize>
+ # 4 slant: r(oman) =upright
+ # 5 setwidth: normal
+ # 6 style:
+ # 7 pixelsize: 0
+ # 8 pointsize: 0 (20 crashes xfs, moved to style)
+ # 9 xresolution: 0
+ # 10 yresolution: 0
+ # 11 spacing: p(roportional)
+ # 12 averagewidth: 0
+ # 13 registry: GNU
+ # 14 encoding: fonstpecific
+
+ # gives:
+ # feta20.pfa -GNU-LilyPond feta-20-r-normal--0-0-0-0-p-0-gnu-fontspecific
+
+ # However, GNOME (gtkfontsel, gnome apps) seems to want:
+
+ # 1 foundry: GNU
+ # 2 family: LilyPond
+ # 3 weight: <basename>
+ # 4 slant: r(oman) =upright
+ # 5 setwidth: normal
+ # 6 style: <designsize>
+ # 7 pixelsize: 0
+ # 8 pointsize: 0 (20 crashes xfs, moved to style)
+ # 9 xresolution: 0
+ # 10 yresolution: 0
+ # 11 spacing: p(roportional)
+ # 12 averagewidth: 0
+ # 13 registry: GNU
+ # 14 encoding: fonstpecific
+
+ # which gives:
+ # feta20.pfa -GNU-LilyPond-feta-r-normal--20-0-0-0-p-0-gnu-fontspecific
+ # foundry: GNU
+
+ ## ouch, pointsize 20 crashes xfs
+ ## XXXfeta20.pfa -GNU-LilyPond Feta-regular-r-normal--0-20-0-0-p-0-gnu-fontspecific
+
+ ## feta20.pfa -GNU-LilyPond feta-regular-r-normal-20-0-0-0-0-p-0-gnu-fontspecific
+
+ afmfile = ''
+ if not afmfile:
+ #afmfile = find_file (basename + '.afm')
+ afmfile = basename + '.afm'
+
+ if afmfile:
+ afmfile = os.path.abspath (afmfile)
+ if os.path.exists (afmfile):
+ afm = read_afm_file (afmfile)
+ fontinfo = Font_info (afm.__dict__)
+ else:
+ fontinfo = Font_info (basename)
+
+ family_name = string.join (string.split (fontinfo['FamilyName'],
+ '-'), ' ')
+
+ if sodipodi_p:
+ print string.join ((os.path.abspath (filename),
+ fontinfo.FamilyName,
+ fontinfo.FamilyName,''
+ ),
+
+ ',')
+
+ elif sketch_p:
+ # Sketch's lilypond.sfd map:
+ s = string.join ([fontinfo.FontName,
+ fontinfo.family,
+ '%s %s' % (fontinfo.weight, fontinfo.style),
+ string.join (fontinfo.get_X11 ()[:4], '-'),
+ string.join (fontinfo.get_X11 ()[-2:], '-'),
+ fontinfo.name],
+ ',')
+ print s
+
+ s = string.join ([fontinfo.FamilyName + fontinfo.designsize,
+ fontinfo.family,
+ '%s %s' % (fontinfo.weight, fontinfo.style),
+ string.join (fontinfo.get_X11 ()[:4], '-'),
+ string.join (fontinfo.get_X11 ()[-2:], '-'),
+ fontinfo.name],
+ ',')
+ print s
+ else:
+ print filename + ' -' + string.join (fontinfo.get_X11 (), '-')
import time
def read_log_file (fn):
- str = open (fn).read ()
- str = re.sub ('\n', '', str)
- str = re.sub ('[\t ]+', ' ', str)
+ str = open (fn).read ()
+ str = re.sub ('\n', '', str)
+ str = re.sub ('[\t ]+', ' ', str)
- deps = []
- autolines = []
- def include_func (match, d = deps):
- d.append (match.group (1))
- return ''
+ deps = []
+ autolines = []
+ def include_func (match, d = deps):
+ d.append (match.group (1))
+ return ''
- def auto_func (match, a = autolines):
- a.append (match.group (1))
- return ''
+ def auto_func (match, a = autolines):
+ a.append (match.group (1))
+ return ''
- str = re.sub ('\(([a-zA-Z_0-9-]+\.mf)', include_func, str)
- str = re.sub ('@{(.*?)@}', auto_func, str)
+ str = re.sub ('\(([a-zA-Z_0-9-]+\.mf)', include_func, str)
+ str = re.sub ('@{(.*?)@}', auto_func, str)
- return (autolines, deps)
+ return (autolines, deps)
class Char_metric:
- def __init__ (self):
- pass
+ def __init__ (self):
+ pass
font_family = 'feta'
def parse_logfile (fn):
- (autolines, deps) = read_log_file (fn)
- charmetrics = []
-
- global_info = {
- 'filename' : os.path.splitext (os.path.basename (fn))[0]
- }
- group = ''
-
- for l in autolines:
- tags = string.split (l, '@:')
- if tags[0] == 'group':
- group = tags[1]
- elif tags[0] == 'puorg':
- group = ''
- elif tags[0] == 'char':
- name = tags[9]
-
- name = re.sub ('-', 'M', name)
- if group:
- name = group + '.' + name
- m = {
- 'description': tags[1],
- 'name': name,
- 'code': string.atoi (tags[2]),
- 'breapth': string.atof (tags[3]),
- 'width': string.atof (tags[4]),
- 'depth': string.atof (tags[5]),
- 'height': string.atof (tags[6]),
- 'wx': string.atof (tags[7]),
- 'wy': string.atof (tags[8]),
- }
- charmetrics.append (m)
- elif tags[0] == 'font':
- global font_family
- font_family = (tags[3])
- # To omit 'GNU' (foundry) from font name proper:
- # name = tags[2:]
- #urg
- if 0: # testing
- tags.append ('Regular')
-
- encoding = re.sub (' ','-', tags[5])
- tags = tags[:-1]
- name = tags[1:]
- global_info['design_size'] = string.atof (tags[4])
- global_info['FontName'] = string.join (name, '-')
- global_info['FullName'] = string.join (name,' ')
- global_info['FamilyName'] = string.join (name[1:-1],
- '-')
- if 1:
- global_info['Weight'] = tags[4]
- else: # testing
- global_info['Weight'] = tags[-1]
-
- global_info['FontBBox'] = '0 0 1000 1000'
- global_info['Ascender'] = '0'
- global_info['Descender'] = '0'
- global_info['EncodingScheme'] = encoding
-
- elif tags[0] == 'parameter':
- global_info[tags[1]] = tags[2];
-
- return (global_info, charmetrics, deps)
+ (autolines, deps) = read_log_file (fn)
+ charmetrics = []
+
+ global_info = {
+ 'filename' : os.path.splitext (os.path.basename (fn))[0]
+ }
+ group = ''
+
+ for l in autolines:
+ tags = string.split (l, '@:')
+ if tags[0] == 'group':
+ group = tags[1]
+ elif tags[0] == 'puorg':
+ group = ''
+ elif tags[0] == 'char':
+ name = tags[9]
+
+ name = re.sub ('-', 'M', name)
+ if group:
+ name = group + '.' + name
+ m = {
+ 'description': tags[1],
+ 'name': name,
+ 'code': string.atoi (tags[2]),
+ 'breapth': string.atof (tags[3]),
+ 'width': string.atof (tags[4]),
+ 'depth': string.atof (tags[5]),
+ 'height': string.atof (tags[6]),
+ 'wx': string.atof (tags[7]),
+ 'wy': string.atof (tags[8]),
+ }
+ charmetrics.append (m)
+ elif tags[0] == 'font':
+ global font_family
+ font_family = (tags[3])
+ # To omit 'GNU' (foundry) from font name proper:
+ # name = tags[2:]
+ #urg
+ if 0: # testing
+ tags.append ('Regular')
+
+ encoding = re.sub (' ','-', tags[5])
+ tags = tags[:-1]
+ name = tags[1:]
+ global_info['design_size'] = string.atof (tags[4])
+ global_info['FontName'] = string.join (name, '-')
+ global_info['FullName'] = string.join (name,' ')
+ global_info['FamilyName'] = string.join (name[1:-1],
+ '-')
+ if 1:
+ global_info['Weight'] = tags[4]
+ else: # testing
+ global_info['Weight'] = tags[-1]
+
+ global_info['FontBBox'] = '0 0 1000 1000'
+ global_info['Ascender'] = '0'
+ global_info['Descender'] = '0'
+ global_info['EncodingScheme'] = encoding
+
+ elif tags[0] == 'parameter':
+ global_info[tags[1]] = tags[2];
+
+ return (global_info, charmetrics, deps)
def write_tex_defs (file, global_info, charmetrics):
- nm = font_family
- for m in charmetrics:
-
- texname = re.sub ('[_.]', 'X', m['name'])
- def digit_to_letter (match):
- return chr (ord (match.group(1)) - ord ('0') + ord ('A'))
- texname = re.sub ('([0-9])', digit_to_letter, texname)
- file.write (r'''\gdef\%s%s{\char%d}%%%s''' % \
- (nm, texname, m['code'],'\n'))
- file.write ('\\endinput\n')
+ nm = font_family
+ for m in charmetrics:
+
+ texname = re.sub ('[_.]', 'X', m['name'])
+ def digit_to_letter (match):
+ return chr (ord (match.group(1)) - ord ('0') + ord ('A'))
+ texname = re.sub ('([0-9])', digit_to_letter, texname)
+ file.write (r'''\gdef\%s%s{\char%d}%%%s''' % \
+ (nm, texname, m['code'],'\n'))
+ file.write ('\\endinput\n')
def write_character_lisp_table (file, global_info, charmetrics):
- def conv_char_metric (charmetric):
- f = 1.0
- s = """(%s .
+ def conv_char_metric (charmetric):
+ f = 1.0
+ s = """(%s .
((bbox . (%f %f %f %f))
- (subfont . "%s")
- (subfont-index . %d)
- (attachment . (%f . %f))))
+(subfont . "%s")
+(subfont-index . %d)
+(attachment . (%f . %f))))
""" %(charmetric['name'],
- -charmetric['breapth'] * f,
- -charmetric['depth'] * f,
- charmetric['width'] * f,
- charmetric['height'] * f,
- global_info['filename'],
- charmetric['code'],
- charmetric['wx'],
- charmetric['wy'])
+ -charmetric['breapth'] * f,
+ -charmetric['depth'] * f,
+ charmetric['width'] * f,
+ charmetric['height'] * f,
+ global_info['filename'],
+ charmetric['code'],
+ charmetric['wx'],
+ charmetric['wy'])
- return s
+ return s
- for c in charmetrics:
- file.write (conv_char_metric (c))
+ for c in charmetrics:
+ file.write (conv_char_metric (c))
def write_global_lisp_table (file, global_info):
- str = ''
+ str = ''
- keys = ['staffsize', 'stafflinethickness', 'staff_space',
- 'linethickness', 'black_notehead_width', 'ledgerlinethickness',
- 'design_size',
- 'blot_diameter'
- ]
- for k in keys:
- if global_info.has_key (k):
- str = str + "(%s . %s)\n" % (k,global_info[k])
+ keys = ['staffsize', 'stafflinethickness', 'staff_space',
+ 'linethickness', 'black_notehead_width', 'ledgerlinethickness',
+ 'design_size',
+ 'blot_diameter'
+ ]
+ for k in keys:
+ if global_info.has_key (k):
+ str = str + "(%s . %s)\n" % (k,global_info[k])
- file.write (str)
+ file.write (str)
-
+
def write_ps_encoding (name, file, global_info, charmetrics):
- encs = ['.notdef'] * 256
- for m in charmetrics:
- encs[m['code']] = m['name']
+ encs = ['.notdef'] * 256
+ for m in charmetrics:
+ encs[m['code']] = m['name']
- file.write ('/%s [\n' % name)
- for m in range (0, 256):
- file.write (' /%s %% %d\n' % (encs[m], m))
- file.write ('] def\n')
+ file.write ('/%s [\n' % name)
+ for m in range (0, 256):
+ file.write (' /%s %% %d\n' % (encs[m], m))
+ file.write ('] def\n')
def write_deps (file, deps, targets):
- for t in targets:
- t = re.sub ( '^\\./', '', t)
- file.write ('%s '% t)
- file.write (": ")
- for d in deps:
- file.write ('%s ' % d)
- file.write ('\n')
+ for t in targets:
+ t = re.sub ( '^\\./', '', t)
+ file.write ('%s '% t)
+ file.write (": ")
+ for d in deps:
+ file.write ('%s ' % d)
+ file.write ('\n')
def help ():
- sys.stdout.write(r"""Usage: mf-to-table [OPTIONS] LOGFILEs
+ sys.stdout.write(r"""Usage: mf-to-table [OPTIONS] LOGFILEs
Generate feta metrics table from preparated feta log.
Options:
- -d, --dep=FILE print dependency info to FILE
- -h, --help print this help
- -l, --ly=FILE name output table
- -o, --outdir=DIR prefix for dependency info
- -p, --package=DIR specify package
- -t, --tex=FILE name output tex chardefs
+ -d, --dep=FILE print dependency info to FILE
+ -h, --help print this help
+ -l, --ly=FILE name output table
+ -o, --outdir=DIR prefix for dependency info
+ -p, --package=DIR specify package
+ -t, --tex=FILE name output tex chardefs
- """)
- sys.exit (0)
+ """)
+ sys.exit (0)
(options, files) = \
- getopt.getopt (sys.argv[1:],
- 'a:d:ho:p:t:',
- ['enc=', 'outdir=', 'dep=', 'lisp=',
- 'global-lisp=',
- 'tex=', 'debug', 'help', 'package='])
+ getopt.getopt (sys.argv[1:],
+ 'a:d:ho:p:t:',
+ ['enc=', 'outdir=', 'dep=', 'lisp=',
+ 'global-lisp=',
+ 'tex=', 'debug', 'help', 'package='])
global_lisp_nm = ''
char_lisp_nm = ''
outdir_prefix = '.'
for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--dep' or o == '-d':
- depfile_nm = a
- elif o == '--outdir' or o == '-o':
- outdir_prefix = a
- elif o == '--tex' or o == '-t':
- texfile_nm = a
- elif o == '--lisp':
- char_lisp_nm = a
- elif o == '--global-lisp':
- global_lisp_nm = a
- elif o == '--enc':
- enc_nm = a
- elif o== '--help' or o == '-h':
- help()
- elif o == '--debug':
- debug_b = 1
- else:
- print o
- raise getopt.error
+ o = opt[0]
+ a = opt[1]
+ if o == '--dep' or o == '-d':
+ depfile_nm = a
+ elif o == '--outdir' or o == '-o':
+ outdir_prefix = a
+ elif o == '--tex' or o == '-t':
+ texfile_nm = a
+ elif o == '--lisp':
+ char_lisp_nm = a
+ elif o == '--global-lisp':
+ global_lisp_nm = a
+ elif o == '--enc':
+ enc_nm = a
+ elif o== '--help' or o == '-h':
+ help()
+ elif o == '--debug':
+ debug_b = 1
+ else:
+ print o
+ raise getopt.error
base = re.sub ('.tex$', '', texfile_nm)
for filenm in files:
- (g, m, deps) = parse_logfile (filenm)
-
- write_tex_defs (open (texfile_nm, 'w'), g, m)
- enc_name = 'FetaEncoding'
- if re.search ('parmesan', filenm):
- enc_name = 'ParmesanEncoding'
- elif re.search ('feta-brace', filenm):
- enc_name = 'FetaBraceEncoding'
- elif re.search ('feta-alphabet', filenm):
- enc_name = 'FetaAlphabetEncoding';
-
- write_ps_encoding (enc_name, open (enc_nm, 'w'), g, m)
- write_character_lisp_table (open (char_lisp_nm, 'w'), g, m)
- write_global_lisp_table (open (global_lisp_nm, 'w'), g)
- if depfile_nm:
- write_deps (open (depfile_nm, 'wb'), deps,
- [base + '.log', base + '.dvi', base + '.pfa',
- base + '.pfb', texfile_nm])
+ (g, m, deps) = parse_logfile (filenm)
+
+ write_tex_defs (open (texfile_nm, 'w'), g, m)
+ enc_name = 'FetaEncoding'
+ if re.search ('parmesan', filenm):
+ enc_name = 'ParmesanEncoding'
+ elif re.search ('feta-brace', filenm):
+ enc_name = 'FetaBraceEncoding'
+ elif re.search ('feta-alphabet', filenm):
+ enc_name = 'FetaAlphabetEncoding';
+
+ write_ps_encoding (enc_name, open (enc_nm, 'w'), g, m)
+ write_character_lisp_table (open (char_lisp_nm, 'w'), g, m)
+ write_global_lisp_table (open (global_lisp_nm, 'w'), g)
+ if depfile_nm:
+ write_deps (open (depfile_nm, 'wb'), deps,
+ [base + '.log', base + '.dvi', base + '.pfa',
+ base + '.pfb', texfile_nm])
import sys
def find (pat, dir):
- f = os.popen ('find %s -name "%s"'% (dir, pat))
- lst = []
- for a in f.readlines():
- a = a[:-1]
- lst.append (a)
- return lst
+ f = os.popen ('find %s -name "%s"'% (dir, pat))
+ lst = []
+ for a in f.readlines():
+ a = a[:-1]
+ lst.append (a)
+ return lst
junk_prefix = 'out-www/'
headertext= r"""
<h1>LilyPond samples</h1>
-
+
<p>You're looking at a page with some LilyPond samples. These files
are also included in the distribution. The output is completely
# FIXME breaks on multiple strings.
#
def read_lilypond_header (fn):
- s = open (fn).read ()
- s = re.sub ('%.*$', '', s)
- s = re.sub ('\n', ' ', s)
+ s = open (fn).read ()
+ s = re.sub ('%.*$', '', s)
+ s = re.sub ('\n', ' ', s)
- dict = {}
- m = re.search (r"""\\header\s*{([^}]*)}""", s)
+ dict = {}
+ m = re.search (r"""\\header\s*{([^}]*)}""", s)
- if m:
- s = m.group (1)
- else:
- return dict
+ if m:
+ s = m.group (1)
+ else:
+ return dict
- while s:
- m = re.search (r'''\s*(\S+)\s*=\s*"([^"]+)"''', s)
- if m == None:
- s = ''
- else:
- s = s[m.end (0):]
- left = m.group (1)
- right = m.group (2)
+ while s:
+ m = re.search (r'''\s*(\S+)\s*=\s*"([^"]+)"''', s)
+ if m == None:
+ s = ''
+ else:
+ s = s[m.end (0):]
+ left = m.group (1)
+ right = m.group (2)
- left = re.sub ('"', '', left)
- right = re.sub ('"', '', right)
- dict[left] = right
+ left = re.sub ('"', '', left)
+ right = re.sub ('"', '', right)
+ dict[left] = right
- return dict
+ return dict
def help ():
- sys.stdout.write (r'''Usage: mutopia-index [OPTIONS] INFILE OUTFILE
+ sys.stdout.write (r'''Usage: mutopia-index [OPTIONS] INFILE OUTFILE
Generate index for mutopia.
Options:
- -h, --help print this help
- -o, --output=FILE write output to file
- -s, --subdirs=DIR add subdir
- --suffix=SUF specify suffix
-
+ -h, --help print this help
+ -o, --output=FILE write output to file
+ -s, --subdirs=DIR add subdir
+ --suffix=SUF specify suffix
+
''')
- sys.exit (0)
+ sys.exit (0)
# ugh.
def gen_list (inputs, file_name):
- sys.stderr.write ("generating HTML list %s" % file_name)
- sys.stderr.write ('\n')
- if file_name:
- list = open (file_name, 'w')
- else:
- list = sys.stdout
- list.write ('''<html><head><title>Rendered Examples</title>
+ sys.stderr.write ("generating HTML list %s" % file_name)
+ sys.stderr.write ('\n')
+ if file_name:
+ list = open (file_name, 'w')
+ else:
+ list = sys.stdout
+ list.write ('''<html><head><title>Rendered Examples</title>
<style type="text/css">
hr { border:0; height:1; color: #000000; background-color: #000000; }\n
</style>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
</head>''')
- list.write ('<body bgcolor=white>\n')
-
- if inputs:
- list.write (headertext)
- else:
- list.write (headertext_nopics)
-
- for ex in inputs:
- print ex
-
- (base, ext) = os.path.splitext (ex)
- (base, ext2) = os.path.splitext (base)
- ext = ext2 + ext
-
- header = read_lilypond_header (ex)
- def read_dict (s, default, h = header):
- try:
- ret = h[s]
- except KeyError:
- ret = default
- return ret
- head = read_dict ('title', os.path.basename (base))
- composer = read_dict ('composer', '')
- desc = read_dict ('description', '')
- list.write ('<hr>\n')
- list.write ('<h1>%s</h1>\n' % head);
- if composer:
- list.write ('<h2>%s</h2>\n' % composer)
- if desc:
- list.write ('%s<p>' % desc)
- list.write ('<ul>\n')
-
- def list_item (file_name, desc, type, lst = list):
- if os.path.isfile (file_name):
- lst.write ('<li><a href="%s">%s</a>'
- % (re.sub (junk_prefix, '', file_name), desc))
-
- # FIXME: include warning if it uses \include
- # files.
-
- size = os.stat (file_name)[stat.ST_SIZE]
- kB = (size + 512) / 1024
- if kB:
- lst.write (' (%s %d kB)' % (type, kB))
- else:
- lst.write (' (%s %d characters)'
- % (type, size))
- pictures = ['jpeg', 'png', 'xpm']
- lst.write ('\n')
- else:
- print "can't find" , `file_name`
-
- list_item (base + ext, 'The input', 'ASCII')
-
- pages_found = 0
- for page in range (1, 100):
- f = base + '-page%d.png' % page
-
- if not os.path.isfile (f):
- break
- pages_found += 1
- list_item (f, 'See a picture of page %d' % page, 'png')
-
- if pages_found == 0 and os.path.exists (base + '.png'):
- list_item (base + ".png",
- 'See a picture', 'png')
-
-
- list_item (base + '.pdf', 'Print', 'PDF')
- list_item (base + '.midi', 'Listen', 'MIDI')
- list.write ('</ul>\n');
-
- list.write ('</body></html>\n');
- list.close ()
+ list.write ('<body bgcolor=white>\n')
+
+ if inputs:
+ list.write (headertext)
+ else:
+ list.write (headertext_nopics)
+
+ for ex in inputs:
+ print ex
+
+ (base, ext) = os.path.splitext (ex)
+ (base, ext2) = os.path.splitext (base)
+ ext = ext2 + ext
+
+ header = read_lilypond_header (ex)
+ def read_dict (s, default, h = header):
+ try:
+ ret = h[s]
+ except KeyError:
+ ret = default
+ return ret
+ head = read_dict ('title', os.path.basename (base))
+ composer = read_dict ('composer', '')
+ desc = read_dict ('description', '')
+ list.write ('<hr>\n')
+ list.write ('<h1>%s</h1>\n' % head);
+ if composer:
+ list.write ('<h2>%s</h2>\n' % composer)
+ if desc:
+ list.write ('%s<p>' % desc)
+ list.write ('<ul>\n')
+
+ def list_item (file_name, desc, type, lst = list):
+ if os.path.isfile (file_name):
+ lst.write ('<li><a href="%s">%s</a>'
+ % (re.sub (junk_prefix, '', file_name), desc))
+
+ # FIXME: include warning if it uses \include
+ # files.
+
+ size = os.stat (file_name)[stat.ST_SIZE]
+ kB = (size + 512) / 1024
+ if kB:
+ lst.write (' (%s %d kB)' % (type, kB))
+ else:
+ lst.write (' (%s %d characters)'
+ % (type, size))
+ pictures = ['jpeg', 'png', 'xpm']
+ lst.write ('\n')
+ else:
+ print "can't find" , `file_name`
+
+ list_item (base + ext, 'The input', 'ASCII')
+
+ pages_found = 0
+ for page in range (1, 100):
+ f = base + '-page%d.png' % page
+
+ if not os.path.isfile (f):
+ break
+ pages_found += 1
+ list_item (f, 'See a picture of page %d' % page, 'png')
+
+ if pages_found == 0 and os.path.exists (base + '.png'):
+ list_item (base + ".png",
+ 'See a picture', 'png')
+
+
+ list_item (base + '.pdf', 'Print', 'PDF')
+ list_item (base + '.midi', 'Listen', 'MIDI')
+ list.write ('</ul>\n');
+
+ list.write ('</body></html>\n');
+ list.close ()
(options, files) = getopt.getopt (sys.argv[1:],
- 'ho:', ['help', 'output='])
+ 'ho:', ['help', 'output='])
outfile = 'examples.html'
subdirs = []
for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--help' or o == '-h':
- help ()
- elif o == '--output' or o == '-o':
- outfile = a
+ o = opt[0]
+ a = opt[1]
+ if o == '--help' or o == '-h':
+ help ()
+ elif o == '--output' or o == '-o':
+ outfile = a
dirs = []
for f in files:
- dirs = dirs + find ('out-www', f)
+ dirs = dirs + find ('out-www', f)
if not dirs:
- dirs = ['.']
+ dirs = ['.']
allfiles = []
for d in dirs:
- allfiles = allfiles + find ('*.ly.txt', d)
+ allfiles = allfiles + find ('*.ly.txt', d)
gen_list (allfiles, outfile)
Remove (ont-) LilyPond specific kind of HunGA(a)Rian notation.
- ontgaar $(find flower lily python -name '*cc' -o -name '*hh' -o -name '*ll' -o -name '*yy')
+ ontgaar $(find flower lily python -name '*cc' -o -name '*hh' -o -name '*ll' -o -name '*yy')
- for i in $(find flower lily python -name '*.ontgaar'); do diff -u $(dirname $i)/$(basename $i .ontgaar) $i; done > pats
+ for i in $(find flower lily python -name '*.ontgaar'); do diff -u $(dirname $i)/$(basename $i .ontgaar) $i; done > pats
Mandatory suffixes:
- _ : _ member var
+ _ : _ member var
Optional suffixes:
- _b : bool
- _p : as in lispy pair_p ()?
- _x : x-coor
- _y : y-coor
-
- _byte :
- _char :
- _count : counter
- _drul : Drul_array
- _global : global var
- _grob : Grob
- _mom : moment
- _req : Request
- _scm : SCM
- _str : C string
- _str0 : C string
- _string : C++ string
+ _b : bool
+ _p : as in lispy pair_p ()?
+ _x : x-coor
+ _y : y-coor
+
+ _byte :
+ _char :
+ _count : counter
+ _drul : Drul_array
+ _global : global var
+ _grob : Grob
+ _mom : moment
+ _req : Request
+ _scm : SCM
+ _str : C string
+ _str0 : C string
+ _string : C++ string
Prefixes:
- get_ :
- gh_ : Do not use: part of deprecated Guile api
- ly_ :
- scm_ :
+ get_ :
+ gh_ : Do not use: part of deprecated Guile api
+ ly_ :
+ scm_ :
The Grand Ontgaaring (some may remain):
- _str -> _string
- _ch -> _str0
- _ch_C -> _str0
- _sz -> _str0
-
- _ch -> _char
-
- _C : junk
- _c : junk
- _f : junk
- _i : junk
- _l : junk
- _p : junk, except for lispy is_foo_p ()
- _arr : junk
- _array : junk
+ _str -> _string
+ _ch -> _str0
+ _ch_C -> _str0
+ _sz -> _str0
+
+ _ch -> _char
+
+ _C : junk
+ _c : junk
+ _f : junk
+ _i : junk
+ _l : junk
+ _p : junk, except for lispy is_foo_p ()
+ _arr : junk
+ _array : junk
'''
-
+
import re
import sys
files = sys.argv[1:]
for f in files:
- print f
- s = open (f).read ()
-
- # shield stuff
- s = re.sub (r'equal_p', r'equal_pX', s)
- s = re.sub (r'less_p', r'less_pX', s)
- s = re.sub (r'pair_p', r'pair_pX', s)
- s = re.sub (r'smob_p', r'smob_pX', s)
- s = re.sub (r'list_p(\W)', r'list_pX\1', s)
-
- s = re.sub (r'(gh_\w*_(p|str)) *\(', r'\1X (', s)
- s = re.sub (r'(ly_\w*_(p|str)) *\(', r'\1X (', s)
- s = re.sub (r'(scm_\w*_(p|str)) *\(', r'\1X (', s)
-
- s = re.sub (r'to_c(\W)', r'to_cX\1', s)
- s = re.sub (r'P_C', r'P_XC', s)
-
- s = re.sub (r'(\W)get_music(\W)', r'\1Yget_pending_events\2', s)
-
- s = re.sub (r'2_i(\W)', r'2int\1', s)
- s = re.sub (r'2_u(\W)', r'2unsigned\1', s)
- s = re.sub (r'2_f(\W)', r'2double\1', s)
-
- s = re.sub (r'(\w+)_str *\(', r'\1_string (', s)
-
-
- # member vars or multipart names
- s = re.sub (r'(\w+)_(c|f|i|l|p)_(\W)', r'\1_\3', s)
- s = re.sub (r'(\w+)_(c|f|i|l|p)_arr(_|\W)', r'\1_array\3', s)
- s = re.sub (r'(\w+)_arr_', r'\1_array_', s)
- s = re.sub (r'(\w+)_str_', r'\1_string_', s)
- s = re.sub (r'(\w+)_sz', r'\1_str0', s)
-
- # functions
- s = re.sub (r'(\W)ch_C *\(', r'\1Yto_str0 (', s)
- s = re.sub (r'(\W)byte_C *\(', r'\1Yto_bytes (', s)
- s = re.sub (r'(\W)byte_l *\(', r'\1Yget_bytes (', s)
- s = re.sub (r'(\W)value_i *\(', r'\1Yto_int (', s)
- s = re.sub (r'(\W)value_f *\(', r'\1Yto_double (', s)
- s = re.sub (r'find_i *\(', r'Yfind_index (', s)
- s = re.sub (r'compare_i *\)', r'compare)', s)
-
-
- s = re.sub (r'(\w+)_(c|f|i|l|p) *\(', r'Yget_\1 (', s)
-
- s = re.sub (r'(\w+)_arr *\(', r'\1_array (', s)
- s = re.sub (r'(\w+)_ch *\(', r'\1_str0 (', s)
- s = re.sub (r'(\w+)_str *\(', r'\1_string (', s)
-
- s = re.sub (r'(\W)str *\(', r'\1string (', s)
- s = re.sub (r'(\W)arr *\(', r'\1array (', s)
-
- s = re.sub (r'(\w+)_ch_C *\(', r'\1_str0 (', s)
- s = re.sub (r'(\w+)_ch *\(', r'\1_str0 (', s)
-
- # more member vars or multipart names
- s = re.sub (r'(\w+)_ch_C', r'\1_str0', s)
- s = re.sub (r'(\w+)_ch_', r'\1_char_', s)
- s = re.sub (r'(\W)ch_C(\W)', r'\1str0\2', s)
-
- # plain vars -- can't do, as we have
- # Real foo_f, int foo_i, SCM foo constructs
- # s = re.sub (r'(\w+)_(c|f|i|l|p)(\W)', r'\1_\3', s)
-
-
- # but these will go
- s = re.sub (r'(\W)arr_(l|p)(\W)', r'\1array\3', s)
- s = re.sub (r'new_(l|p)', r'new_pX', s)
- s = re.sub (r'(\w+)_(l|p)(\W)', r'\1\3', s)
-
- s = re.sub (r'(\w+)_arr(\W)', r'\1_array\2', s)
- s = re.sub (r'(\w+)_str(\W)', r'\1_string\2', s)
-
- s = re.sub (r'(\w+)_ch_C(\W)', r'\1_str0\2', s)
- s = re.sub (r'(\w+)_ch(\W)', r'\1_char\2', s)
-
- s = re.sub (r'(\w+)_C(\W)', r'\1\2', s)
-
- # fixups
- s = re.sub (r'Yfind', 'find', s)
- s = re.sub (r'Yget_argument_to', 'get_argument_index', s)
- s = re.sub (r'Yget_compare', 'compare', s)
- s = re.sub (r'Yget_cons', 'cons', s)
- s = re.sub (r'Yget_create', 'create', s)
- s = re.sub (r'Yget_find', 'find', s)
- s = re.sub (r'Yget_hex', 'hex', s)
- s = re.sub (r'Yget_index', 'index', s)
- s = re.sub (r'Yget_length', 'length', s)
- s = re.sub (r'Yget_remove', 'remove', s)
- s = re.sub (r'Yget_report', 'report', s)
- s = re.sub (r'Yget_size', 'size', s)
- s = re.sub (r'Yget_get', 'get', s)
- s = re.sub (r'Yget', 'get', s)
- s = re.sub (r'Yto', 'to', s)
-
-
- s = re.sub (r'(bin2dec|bin2hex|dec2bin|hex2bin)_string', r'\1', s)
- s = re.sub (r'i2hex_string', 'int2hex', s)
- s = re.sub (r'u2hex_string', 'unsigned2hex', s)
- s = re.sub (r'i2dec_string', 'int2dec', s)
-
- # Would this work?
- s = re.sub (r'split_array', 'split', s)
- s = re.sub (r'custos_array', 'custodes', s)
- s = re.sub (r'primitives_array', 'primitives', s)
- s = re.sub (r'span_array', 'spanners', s)
- s = re.sub (r'(Pointer|Link|Drul|get|heap|_of|remove)_array',
- r'\1_Xarray', s)
- s = re.sub (r'([a-rt-zA-RT-Z])_array', r'\1s', s)
- s = re.sub (r'([sS])_array', r'\1es', s)
- s = re.sub (r'_Xarray', '_array', s)
-
- # shields down
- s = re.sub (r'_pX', r'_p', s)
- s = re.sub (r'_cX', r'_c', s)
- s = re.sub (r'_strX', r'_str', s)
- s = re.sub (r'P_XC', 'P_C', s)
- s = re.sub (r'Xget_music', 'get_music', s)
-
- h = open (f + '.ontgaar', 'w')
- h.write (s)
- h.close ()
-
+ print f
+ s = open (f).read ()
+
+ # shield stuff
+ s = re.sub (r'equal_p', r'equal_pX', s)
+ s = re.sub (r'less_p', r'less_pX', s)
+ s = re.sub (r'pair_p', r'pair_pX', s)
+ s = re.sub (r'smob_p', r'smob_pX', s)
+ s = re.sub (r'list_p(\W)', r'list_pX\1', s)
+
+ s = re.sub (r'(gh_\w*_(p|str)) *\(', r'\1X (', s)
+ s = re.sub (r'(ly_\w*_(p|str)) *\(', r'\1X (', s)
+ s = re.sub (r'(scm_\w*_(p|str)) *\(', r'\1X (', s)
+
+ s = re.sub (r'to_c(\W)', r'to_cX\1', s)
+ s = re.sub (r'P_C', r'P_XC', s)
+
+ s = re.sub (r'(\W)get_music(\W)', r'\1Yget_pending_events\2', s)
+
+ s = re.sub (r'2_i(\W)', r'2int\1', s)
+ s = re.sub (r'2_u(\W)', r'2unsigned\1', s)
+ s = re.sub (r'2_f(\W)', r'2double\1', s)
+
+ s = re.sub (r'(\w+)_str *\(', r'\1_string (', s)
+
+
+ # member vars or multipart names
+ s = re.sub (r'(\w+)_(c|f|i|l|p)_(\W)', r'\1_\3', s)
+ s = re.sub (r'(\w+)_(c|f|i|l|p)_arr(_|\W)', r'\1_array\3', s)
+ s = re.sub (r'(\w+)_arr_', r'\1_array_', s)
+ s = re.sub (r'(\w+)_str_', r'\1_string_', s)
+ s = re.sub (r'(\w+)_sz', r'\1_str0', s)
+
+ # functions
+ s = re.sub (r'(\W)ch_C *\(', r'\1Yto_str0 (', s)
+ s = re.sub (r'(\W)byte_C *\(', r'\1Yto_bytes (', s)
+ s = re.sub (r'(\W)byte_l *\(', r'\1Yget_bytes (', s)
+ s = re.sub (r'(\W)value_i *\(', r'\1Yto_int (', s)
+ s = re.sub (r'(\W)value_f *\(', r'\1Yto_double (', s)
+ s = re.sub (r'find_i *\(', r'Yfind_index (', s)
+ s = re.sub (r'compare_i *\)', r'compare)', s)
+
+
+ s = re.sub (r'(\w+)_(c|f|i|l|p) *\(', r'Yget_\1 (', s)
+
+ s = re.sub (r'(\w+)_arr *\(', r'\1_array (', s)
+ s = re.sub (r'(\w+)_ch *\(', r'\1_str0 (', s)
+ s = re.sub (r'(\w+)_str *\(', r'\1_string (', s)
+
+ s = re.sub (r'(\W)str *\(', r'\1string (', s)
+ s = re.sub (r'(\W)arr *\(', r'\1array (', s)
+
+ s = re.sub (r'(\w+)_ch_C *\(', r'\1_str0 (', s)
+ s = re.sub (r'(\w+)_ch *\(', r'\1_str0 (', s)
+
+ # more member vars or multipart names
+ s = re.sub (r'(\w+)_ch_C', r'\1_str0', s)
+ s = re.sub (r'(\w+)_ch_', r'\1_char_', s)
+ s = re.sub (r'(\W)ch_C(\W)', r'\1str0\2', s)
+
+ # plain vars -- can't do, as we have
+ # Real foo_f, int foo_i, SCM foo constructs
+ # s = re.sub (r'(\w+)_(c|f|i|l|p)(\W)', r'\1_\3', s)
+
+
+ # but these will go
+ s = re.sub (r'(\W)arr_(l|p)(\W)', r'\1array\3', s)
+ s = re.sub (r'new_(l|p)', r'new_pX', s)
+ s = re.sub (r'(\w+)_(l|p)(\W)', r'\1\3', s)
+
+ s = re.sub (r'(\w+)_arr(\W)', r'\1_array\2', s)
+ s = re.sub (r'(\w+)_str(\W)', r'\1_string\2', s)
+
+ s = re.sub (r'(\w+)_ch_C(\W)', r'\1_str0\2', s)
+ s = re.sub (r'(\w+)_ch(\W)', r'\1_char\2', s)
+
+ s = re.sub (r'(\w+)_C(\W)', r'\1\2', s)
+
+ # fixups
+ s = re.sub (r'Yfind', 'find', s)
+ s = re.sub (r'Yget_argument_to', 'get_argument_index', s)
+ s = re.sub (r'Yget_compare', 'compare', s)
+ s = re.sub (r'Yget_cons', 'cons', s)
+ s = re.sub (r'Yget_create', 'create', s)
+ s = re.sub (r'Yget_find', 'find', s)
+ s = re.sub (r'Yget_hex', 'hex', s)
+ s = re.sub (r'Yget_index', 'index', s)
+ s = re.sub (r'Yget_length', 'length', s)
+ s = re.sub (r'Yget_remove', 'remove', s)
+ s = re.sub (r'Yget_report', 'report', s)
+ s = re.sub (r'Yget_size', 'size', s)
+ s = re.sub (r'Yget_get', 'get', s)
+ s = re.sub (r'Yget', 'get', s)
+ s = re.sub (r'Yto', 'to', s)
+
+
+ s = re.sub (r'(bin2dec|bin2hex|dec2bin|hex2bin)_string', r'\1', s)
+ s = re.sub (r'i2hex_string', 'int2hex', s)
+ s = re.sub (r'u2hex_string', 'unsigned2hex', s)
+ s = re.sub (r'i2dec_string', 'int2dec', s)
+
+ # Would this work?
+ s = re.sub (r'split_array', 'split', s)
+ s = re.sub (r'custos_array', 'custodes', s)
+ s = re.sub (r'primitives_array', 'primitives', s)
+ s = re.sub (r'span_array', 'spanners', s)
+ s = re.sub (r'(Pointer|Link|Drul|get|heap|_of|remove)_array',
+ r'\1_Xarray', s)
+ s = re.sub (r'([a-rt-zA-RT-Z])_array', r'\1s', s)
+ s = re.sub (r'([sS])_array', r'\1es', s)
+ s = re.sub (r'_Xarray', '_array', s)
+
+ # shields down
+ s = re.sub (r'_pX', r'_p', s)
+ s = re.sub (r'_cX', r'_c', s)
+ s = re.sub (r'_strX', r'_str', s)
+ s = re.sub (r'P_XC', 'P_C', s)
+ s = re.sub (r'Xget_music', 'get_music', s)
+
+ h = open (f + '.ontgaar', 'w')
+ h.write (s)
+ h.close ()
+
import time
def usage ():
- sys.stderr.write ('''
+ sys.stderr.write ('''
texi2omf [options] FILE.texi > FILE.omf
Options:
''')
-
+
(options, files) = getopt.getopt (sys.argv[1:], '',
- ['format=', 'location=', 'version='])
+ ['format=', 'location=', 'version='])
license = 'FDL'
location = ''
format = 'xml'
for (o, a) in options:
- if o == '--format':
- format = a
- elif o == '--location':
- location = 'file:%s' % a
- elif o == '--version':
- version = a
- else:
- assert 0
-
-
+ if o == '--format':
+ format = a
+ elif o == '--location':
+ location = 'file:%s' % a
+ elif o == '--version':
+ version = a
+ else:
+ assert 0
+
+
if not files:
- usage ()
- sys.exit (2)
+ usage ()
+ sys.exit (2)
formats = {
- 'html' : 'text/html',
- 'pdf' : 'application/pdf',
- 'ps.gz' : 'application/postscript',
- 'ps' : 'application/postscript',
- 'xml' : 'text/xml',
- }
+ 'html' : 'text/html',
+ 'pdf' : 'application/pdf',
+ 'ps.gz' : 'application/postscript',
+ 'ps' : 'application/postscript',
+ 'xml' : 'text/xml',
+ }
if not formats.has_key (format):
- sys.stderr.write ("Format `%s' unknown\n" % format)
- sys.exit (1)
+ sys.stderr.write ("Format `%s' unknown\n" % format)
+ sys.exit (1)
infile = files[0]
texi = open (infile).read ()
if not location:
- location = 'file:/%s' % re.sub (r'\..*', '.' + format, infile)
+ location = 'file:/%s' % re.sub (r'\..*', '.' + format, infile)
omf_vars = {
- 'date': '%d-%d-%d' % today[:3],
- 'mimeformat': formats[format],
- 'maintainer': "%s (%s)" % (name, email),
- 'version' : version,
- 'location' : location,
- 'language' : 'C',
- }
+ 'date': '%d-%d-%d' % today[:3],
+ 'mimeformat': formats[format],
+ 'maintainer': "%s (%s)" % (name, email),
+ 'version' : version,
+ 'location' : location,
+ 'language' : 'C',
+ }
omf_caterories = ['subject', 'creator', 'maintainer', 'contributor',
- 'title', 'subtitle', 'version', 'category', 'type',
- 'description', 'license', 'language',]
-
+ 'title', 'subtitle', 'version', 'category', 'type',
+ 'description', 'license', 'language',]
+
for a in omf_caterories:
- m = re.search ('@omf%s (.*)\n'% a, texi)
- if m:
- omf_vars[a] = m.group (1)
- elif not omf_vars.has_key (a):
- omf_vars[a] = ''
-
+ m = re.search ('@omf%s (.*)\n'% a, texi)
+ if m:
+ omf_vars[a] = m.group (1)
+ elif not omf_vars.has_key (a):
+ omf_vars[a] = ''
+
if not omf_vars['title']:
- title = ''
- m = re.search ('@title (.*)\n', texi)
- if m:
- title = m.group (1)
+ title = ''
+ m = re.search ('@title (.*)\n', texi)
+ if m:
+ title = m.group (1)
- subtitle = ''
- m = re.search ('@subtitle (.*)\n', texi)
- if m:
- subtitle = m.group (1)
+ subtitle = ''
+ m = re.search ('@subtitle (.*)\n', texi)
+ if m:
+ subtitle = m.group (1)
- if subtitle:
- title = '%s -- %s' % (title, subtitle)
+ if subtitle:
+ title = '%s -- %s' % (title, subtitle)
- omf_vars['title'] = title
-
+ omf_vars['title'] = title
+
if not omf_vars['creator']:
- m = re.search ('@author (.*)\n', texi)
- if m:
- omf_vars['creator'] = m.group (1)
+ m = re.search ('@author (.*)\n', texi)
+ if m:
+ omf_vars['creator'] = m.group (1)
print r'''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE omf PUBLIC "-//OMF//DTD Scrollkeeper OMF Variant V1.0//EN" "http://scrollkeeper.sourceforge.net/dtds/scrollkeeper-omf-1.0/scrollkeeper-omf.dtd">
<omf>
- <resource>
- <creator>
- %(creator)s
- </creator>
- <maintainer>
- %(maintainer)s
- </maintainer>
- <title>
- %(title)s
- </title>
- <date>
- %(date)s
- </date>
- <version identifier="%(version)s" date="%(date)s" />
- <subject category="%(category)s"/>
- <description>
- %(description)s
- </description>
- <type>
- %(type)s
- </type>
- <format mime="%(mimeformat)s" />
- <identifier url="%(location)s"/>
- <language code="%(language)s"/>
- <rights type="%(license)s" />
- </resource>
+ <resource>
+ <creator>
+ %(creator)s
+ </creator>
+ <maintainer>
+ %(maintainer)s
+ </maintainer>
+ <title>
+ %(title)s
+ </title>
+ <date>
+ %(date)s
+ </date>
+ <version identifier="%(version)s" date="%(date)s" />
+ <subject category="%(category)s"/>
+ <description>
+ %(description)s
+ </description>
+ <type>
+ %(type)s
+ </type>
+ <format mime="%(mimeformat)s" />
+ <identifier url="%(location)s"/>
+ <language code="%(language)s"/>
+ <rights type="%(license)s" />
+ </resource>
</omf>
''' % omf_vars
sane_putenv ("INSTALLER_ROOT", prefix, true);
+ read_relocation_dir (prefix + "/etc/relocate/");
+
+#ifdef OLD_RELOCATION
string bindir = prefix + "/bin";
string datadir = prefix + "/share";
string libdir = prefix + "/lib";
string sysconfdir = prefix + "/etc";
-
+
/* need otherwise dynamic .so's aren't found. */
prepend_env_path ("DYLD_LIBRARY_PATH", libdir);
prepend_env_path ("GS_FONTPATH", datadir + "/gs/fonts");
prepend_env_path ("GS_LIB", datadir + "/gs/Resource");
prepend_env_path ("GS_LIB", datadir + "/gs/lib");
-
+
prepend_env_path ("GUILE_LOAD_PATH", datadir
+ to_string ("/guile/%d.%d",
SCM_MAJOR_VERSION, SCM_MINOR_VERSION));
-
set_env_file ("PANGO_RC_FILE", sysconfdir + "/pango/pangorc");
set_env_dir ("PANGO_PREFIX", prefix);
+
+#endif
+
prepend_env_path ("PATH", bindir);
}
mail_address = '(address unknown)'
try:
- mail_address= os.environ['MAILADDRESS']
+ mail_address= os.environ['MAILADDRESS']
except KeyError:
- pass
+ pass
mail_address_url= 'mailto:' + mail_address
if re.search ("http://", mail_address):
- mail_address_url = mail_address
-
+ mail_address_url = mail_address
+
webmaster= mail_address
try:
- webmaster= os.environ['WEBMASTER']
+ webmaster= os.environ['WEBMASTER']
except KeyError:
- pass
+ pass
header_file = ''
footer_file = ''
def gulp_file (f):
- try:
- i = open(f)
- i.seek (0, 2)
- n = i.tell ()
- i.seek (0,0)
- except:
- sys.stderr.write ("can't open file: %s\n" % f)
- return ''
- s = i.read (n)
- if len (s) <= 0:
- sys.stderr.write ("gulped empty file: %s\n" % f)
- i.close ()
- return s
+ try:
+ i = open(f)
+ i.seek (0, 2)
+ n = i.tell ()
+ i.seek (0,0)
+ except:
+ sys.stderr.write ("can't open file: %s\n" % f)
+ return ''
+ s = i.read (n)
+ if len (s) <= 0:
+ sys.stderr.write ("gulped empty file: %s\n" % f)
+ i.close ()
+ return s
def help ():
- sys.stdout.write (r"""Usage: add-html-footer [OPTIONS]... HTML-FILE
+ sys.stdout.write (r"""Usage: add-html-footer [OPTIONS]... HTML-FILE
Add header, footer and top of ChangLog file (up to the ********) to HTML-FILE
Options:
- --changelog=FILE use FILE as ChangeLog [ChangeLog]
- --footer=FILE use FILE as footer
- --header=FILE use FILE as header
- -h, --help print this help
- --index=URL set homepage to URL
- --name=NAME set package_name to NAME
- --version=VERSION set package version to VERSION
+ --changelog=FILE use FILE as ChangeLog [ChangeLog]
+ --footer=FILE use FILE as footer
+ --header=FILE use FILE as header
+ -h, --help print this help
+ --index=URL set homepage to URL
+ --name=NAME set package_name to NAME
+ --version=VERSION set package version to VERSION
""")
- sys.exit (0)
+ sys.exit (0)
(options, files) = getopt.getopt(sys.argv[1:], 'h', [
- 'changelog=', 'footer=', 'header=', 'help', 'index=',
- 'name=', 'version='])
+ 'changelog=', 'footer=', 'header=', 'help', 'index=',
+ 'name=', 'version='])
for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--changelog':
- changelog_file = a
- elif o == '--footer':
- footer_file = a
- elif o == '--header':
- header_file = a
- elif o == '-h' or o == '--help':
- help ()
- # urg, this is top!
- elif o == '--index':
- index_url = a
- elif o == '--name':
- package_name = a
- elif o == '--version':
- package_version = a
- else:
- raise 'unknown opt ', o
+ o = opt[0]
+ a = opt[1]
+ if o == '--changelog':
+ changelog_file = a
+ elif o == '--footer':
+ footer_file = a
+ elif o == '--header':
+ header_file = a
+ elif o == '-h' or o == '--help':
+ help ()
+ # urg, this is top!
+ elif o == '--index':
+ index_url = a
+ elif o == '--name':
+ package_name = a
+ elif o == '--version':
+ package_version = a
+ else:
+ raise 'unknown opt ', o
#burp?
def set_gcos ():
- global gcos
- os.environ["CONFIGSUFFIX"] = 'www';
- if os.name == 'nt':
- import ntpwd
- pw = ntpwd.getpwname(os.environ['USERNAME'])
- else:
- import pwd
- if os.environ.has_key('FAKEROOTKEY') and os.environ.has_key('LOGNAME'):
- pw = pwd.getpwnam (os.environ['LOGNAME'])
- else:
- pw = pwd.getpwuid (os.getuid())
-
- f = pw[4]
- f = string.split (f, ',')[0]
- gcos = f
+ global gcos
+ os.environ["CONFIGSUFFIX"] = 'www';
+ if os.name == 'nt':
+ import ntpwd
+ pw = ntpwd.getpwname(os.environ['USERNAME'])
+ else:
+ import pwd
+ if os.environ.has_key('FAKEROOTKEY') and os.environ.has_key('LOGNAME'):
+ pw = pwd.getpwnam (os.environ['LOGNAME'])
+ else:
+ pw = pwd.getpwuid (os.getuid())
+
+ f = pw[4]
+ f = string.split (f, ',')[0]
+ gcos = f
def compose (default, file):
- s = default
- if file:
- s = gulp_file (file)
- return s
+ s = default
+ if file:
+ s = gulp_file (file)
+ return s
set_gcos ()
localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
if os.path.basename (index_url) != "index.html":
- index_url = os.path.join (index_url , "index.html")
+ index_url = os.path.join (index_url , "index.html")
top_url = os.path.dirname (index_url) + "/"
header = compose (default_header, header_file)
# On most platforms, this is equivalent to
#`normpath(join(os.getcwd()), PATH)'. *Added in Python version 1.5.2*
if os.path.__dict__.has_key ('abspath'):
- abspath = os.path.abspath
+ abspath = os.path.abspath
else:
- def abspath (path):
- return os.path.normpath (os.path.join (os.getcwd (), path))
-
-
-def remove_self_ref (s):
- self_url = abspath (os.getcwd () + '/' + f)
- #sys.stderr.write ('url0: %s\n' % self_url)
-
- # self_url = re.sub ('.*?' + string.lower (package_name) + '[^/]*/',
- # '', self_url)
- # URG - this only works when source tree is unpacked in `src/' dir
- # For some reason, .*? still eats away
- # /home/fred/usr/src/lilypond-1.5.14/Documentation/user/out-www/lilypond/
- # instead of just
- #
- # /home/fred/usr/src/lilypond-1.5.14/
- #
- # Tutorial.html
- self_url = re.sub ('.*?src/' + string.lower (package_name) + '[^/]*/',
- '', self_url)
-
- #sys.stderr.write ('url1: %s\n' % self_url)
-
- #urg, ugly lily-specific toplevel index hack
- self_url = re.sub ('.*topdocs/out-www/index.html', 'index.html', self_url)
- #sys.stderr.write ('url2: %s\n' % self_url)
-
- # ugh, python2.[12] re is broken.
- ## pat = re.compile ('.*?(<a href="[\./]*' + self_url + '#?[^"]*">)([^<]*)(</a>)', re.DOTALL)
- pat = re.compile ('[.\n]*?(<a href="[\./]*' + self_url + '#?[^"]*">)([^<]*)(</a>)')
- m = pat.search (s)
- while m:
- #sys.stderr.write ('self: %s\n' % m.group (2))
- s = s[:m.start (1)] + m.group (2) + s[m.end (3):]
- m = pat.search (s)
- return s
+ def abspath (path):
+ return os.path.normpath (os.path.join (os.getcwd (), path))
+
+
+def remove_self_ref (s):
+ self_url = abspath (os.getcwd () + '/' + f)
+ #sys.stderr.write ('url0: %s\n' % self_url)
+
+ # self_url = re.sub ('.*?' + string.lower (package_name) + '[^/]*/',
+ # '', self_url)
+ # URG - this only works when source tree is unpacked in `src/' dir
+ # For some reason, .*? still eats away
+ # /home/fred/usr/src/lilypond-1.5.14/Documentation/user/out-www/lilypond/
+ # instead of just
+ #
+ # /home/fred/usr/src/lilypond-1.5.14/
+ #
+ # Tutorial.html
+ self_url = re.sub ('.*?src/' + string.lower (package_name) + '[^/]*/',
+ '', self_url)
+
+ #sys.stderr.write ('url1: %s\n' % self_url)
+
+ #urg, ugly lily-specific toplevel index hack
+ self_url = re.sub ('.*topdocs/out-www/index.html', 'index.html', self_url)
+ #sys.stderr.write ('url2: %s\n' % self_url)
+
+ # ugh, python2.[12] re is broken.
+ ## pat = re.compile ('.*?(<a href="[\./]*' + self_url + '#?[^"]*">)([^<]*)(</a>)', re.DOTALL)
+ pat = re.compile ('[.\n]*?(<a href="[\./]*' + self_url + '#?[^"]*">)([^<]*)(</a>)')
+ m = pat.search (s)
+ while m:
+ #sys.stderr.write ('self: %s\n' % m.group (2))
+ s = s[:m.start (1)] + m.group (2) + s[m.end (3):]
+ m = pat.search (s)
+ return s
def do_file (f):
- s = gulp_file (f)
- s = re.sub ('%', '%%', s)
-
-
- if re.search (header_tag, s) == None:
- body = '<BODY BGCOLOR=WHITE TEXT=BLACK>'
- s = re.sub ('(?i)<body>', body, s)
- if re.search ('(?i)<BODY', s):
- s = re.sub ('(?i)<body[^>]*>', body + header, s, 1)
- elif re.search ('(?i)<html', s):
- s = re.sub ('(?i)<html>', '<HTML>' + header, s, 1)
- else:
- s = header + s
-
- s = header_tag + '\n' + s
-
- if re.search ('(?i)<!DOCTYPE', s) == None:
- doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
- s = doctype + s
-
- if re.search (footer_tag, s) == None:
- if re.search ('(?i)</body', s):
- s = re.sub ('(?i)</body>', footer_tag + footer + '\n' + '</BODY>', s, 1)
- elif re.search ('(?i)</html', s):
- s = re.sub ('(?i)</html>', footer_tag + footer + '\n' + '</HTML>', s, 1)
- else:
- s = s + footer_tag + footer + '\n'
-
- s = i18n (f, s)
-
- #URUGRGOUSNGUOUNRIU
- index = index_url
- top = top_url
- if os.path.basename (f) == "index.html":
- cwd = os.getcwd ()
- if os.path.basename (cwd) == "topdocs":
- index = "index.html"
- top = ""
-
- # don't cause ///////index.html entries in log files.
- # index = "./index.html"
- # top = "./"
-
- versiontup = string.split(package_version, '.')
- branch_str = 'stable-branch'
- if string.atoi ( versiontup[1]) % 2:
- branch_str = 'development-branch'
-
- wiki_page = ('v%s.%s-' % (versiontup[0], versiontup[1]) + f)
- wiki_page = re.sub ('out-www/', '', wiki_page)
- wiki_page = re.sub ('/', '-', wiki_page)
- wiki_page = re.sub (r'\.-', '', wiki_page)
- wiki_page = re.sub ('.html', '', wiki_page)
-
- wiki_string = ''
-
- if wiki_base:
- wiki_string = (r'''<a href="%(wiki_base)s%(wiki_page)s">Read </a> comments on this page, or
- <a href="%(wiki_base)s%(wiki_page)s?action=edit">add</a> one.''' %
- { 'wiki_base': wiki_base,
- 'wiki_page': wiki_page})
-
- subst = globals ()
- subst.update (locals())
- s = s % subst
-
- # urg
- # maybe find first node?
- fallback_web_title = '-- --'
-
- # ugh, python2.[12] re is broken.
- #m = re.match ('.*?<title>\(.*?\)</title>', s, re.DOTALL)
- m = re.match ('[.\n]*?<title>([.\n]*?)</title>', s)
- if m:
- fallback_web_title = m.group (1)
- s = re.sub ('@WEB-TITLE@', fallback_web_title, s)
-
- s = remove_self_ref (s)
-
- # remove info's annoying's indication of referencing external document
- s = re.sub (' \((lilypond|lilypond-internals|music-glossary)\)</a>',
- '</a>', s)
-
- open (f, 'w').write (s)
+ s = gulp_file (f)
+ s = re.sub ('%', '%%', s)
+
+
+ if re.search (header_tag, s) == None:
+ body = '<BODY BGCOLOR=WHITE TEXT=BLACK>'
+ s = re.sub ('(?i)<body>', body, s)
+ if re.search ('(?i)<BODY', s):
+ s = re.sub ('(?i)<body[^>]*>', body + header, s, 1)
+ elif re.search ('(?i)<html', s):
+ s = re.sub ('(?i)<html>', '<HTML>' + header, s, 1)
+ else:
+ s = header + s
+
+ s = header_tag + '\n' + s
+
+ if re.search ('(?i)<!DOCTYPE', s) == None:
+ doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
+ s = doctype + s
+
+ if re.search (footer_tag, s) == None:
+ if re.search ('(?i)</body', s):
+ s = re.sub ('(?i)</body>', footer_tag + footer + '\n' + '</BODY>', s, 1)
+ elif re.search ('(?i)</html', s):
+ s = re.sub ('(?i)</html>', footer_tag + footer + '\n' + '</HTML>', s, 1)
+ else:
+ s = s + footer_tag + footer + '\n'
+
+ s = i18n (f, s)
+
+ #URUGRGOUSNGUOUNRIU
+ index = index_url
+ top = top_url
+ if os.path.basename (f) == "index.html":
+ cwd = os.getcwd ()
+ if os.path.basename (cwd) == "topdocs":
+ index = "index.html"
+ top = ""
+
+ # don't cause ///////index.html entries in log files.
+ # index = "./index.html"
+ # top = "./"
+
+ versiontup = string.split(package_version, '.')
+ branch_str = 'stable-branch'
+ if string.atoi ( versiontup[1]) % 2:
+ branch_str = 'development-branch'
+
+ wiki_page = ('v%s.%s-' % (versiontup[0], versiontup[1]) + f)
+ wiki_page = re.sub ('out-www/', '', wiki_page)
+ wiki_page = re.sub ('/', '-', wiki_page)
+ wiki_page = re.sub (r'\.-', '', wiki_page)
+ wiki_page = re.sub ('.html', '', wiki_page)
+
+ wiki_string = ''
+
+ if wiki_base:
+ wiki_string = (r'''<a href="%(wiki_base)s%(wiki_page)s">Read </a> comments on this page, or
+ <a href="%(wiki_base)s%(wiki_page)s?action=edit">add</a> one.''' %
+ { 'wiki_base': wiki_base,
+ 'wiki_page': wiki_page})
+
+ subst = globals ()
+ subst.update (locals())
+ s = s % subst
+
+ # urg
+ # maybe find first node?
+ fallback_web_title = '-- --'
+
+ # ugh, python2.[12] re is broken.
+ #m = re.match ('.*?<title>\(.*?\)</title>', s, re.DOTALL)
+ m = re.match ('[.\n]*?<title>([.\n]*?)</title>', s)
+ if m:
+ fallback_web_title = m.group (1)
+ s = re.sub ('@WEB-TITLE@', fallback_web_title, s)
+
+ s = remove_self_ref (s)
+
+ # remove info's annoying's indication of referencing external document
+ s = re.sub (' \((lilypond|lilypond-internals|music-glossary)\)</a>',
+ '</a>', s)
+
+ open (f, 'w').write (s)
localedir = 'out/locale'
try:
- import gettext
- gettext.bindtextdomain ('newweb', localedir)
- gettext.textdomain ('newweb')
- _ = gettext.gettext
+ import gettext
+ gettext.bindtextdomain ('newweb', localedir)
+ gettext.textdomain ('newweb')
+ _ = gettext.gettext
except:
- def _ (s):
- return s
+ def _ (s):
+ return s
underscore = _
LANGUAGES = (
- ('site', 'English'),
- ('nl', 'Nederlands'),
- )
+ ('site', 'English'),
+ ('nl', 'Nederlands'),
+ )
language_available = _ ("Other languages: %s.") % "%(language_menu)s"
browser_language = _ ("Using <A HREF='%s'>automatic language selection</A>.") \
- % "%(root_url)sabout/browser-language"
+ % "%(root_url)sabout/browser-language"
LANGUAGES_TEMPLATE = '''\
<P>
- %(language_available)s
- <BR>
- %(browser_language)s
+ %(language_available)s
+ <BR>
+ %(browser_language)s
</P>
''' % vars ()
def file_lang (file, lang):
- (base, ext) = os.path.splitext (file)
- base = os.path.splitext (base)[0]
- if lang and lang != 'site':
- return base + '.' + lang + ext
- return base + ext
+ (base, ext) = os.path.splitext (file)
+ base = os.path.splitext (base)[0]
+ if lang and lang != 'site':
+ return base + '.' + lang + ext
+ return base + ext
def i18n (file_name, page):
- # ugh
- root_url = "/web/"
-
- base_name = os.path.basename (file_name)
-
- lang = 'site'
- m = re.match ('.*[.]([^.]*).html', file_name)
- if m:
- lang = m.group (1)
-
- # Find available translations of this page.
- available = filter (lambda x: lang != x[0] \
- and os.path.exists (file_lang (file_name, x[0])),
- LANGUAGES)
-
- # Strip .html, .png suffix for auto language selection.
-# page = re.sub ('''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html(#[^"]*)|.png)[\'"]''',
-# '\\1="\\2"', page)
-
- # Create language menu.
- language_menu = ''
- for (prefix, name) in available:
- lang_file = file_lang (base_name, prefix)
- language_menu += '<a href="%(lang_file)s">%(name)s</a>' % vars ()
-
- languages = ''
- if language_menu:
- languages = LANGUAGES_TEMPLATE % vars ()
-
- # Put language menu before '</body>' and '</html>' tags
- if re.search ('(?i)</body', page):
- page = re.sub ('(?i)</body>', languages + '</BODY>', page, 1)
- elif re.search ('(?i)</html', page):
- page = re.sub ('(?i)</html>', languages + '</HTML>', page, 1)
- else:
- page = page + languages
-
- return page
- ## end i18n
+ # ugh
+ root_url = "/web/"
+
+ base_name = os.path.basename (file_name)
+
+ lang = 'site'
+ m = re.match ('.*[.]([^.]*).html', file_name)
+ if m:
+ lang = m.group (1)
+
+ # Find available translations of this page.
+ available = filter (lambda x: lang != x[0] \
+ and os.path.exists (file_lang (file_name, x[0])),
+ LANGUAGES)
+
+ # Strip .html, .png suffix for auto language selection.
+# page = re.sub ('''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html(#[^"]*)|.png)[\'"]''',
+# '\\1="\\2"', page)
+
+ # Create language menu.
+ language_menu = ''
+ for (prefix, name) in available:
+ lang_file = file_lang (base_name, prefix)
+ language_menu += '<a href="%(lang_file)s">%(name)s</a>' % vars ()
+
+ languages = ''
+ if language_menu:
+ languages = LANGUAGES_TEMPLATE % vars ()
+
+ # Put language menu before '</body>' and '</html>' tags
+ if re.search ('(?i)</body', page):
+ page = re.sub ('(?i)</body>', languages + '</BODY>', page, 1)
+ elif re.search ('(?i)</html', page):
+ page = re.sub ('(?i)</html>', languages + '</HTML>', page, 1)
+ else:
+ page = page + languages
+
+ return page
+ ## end i18n
for f in files:
- do_file (f)
+ do_file (f)
create_dir = False
for (o,a) in opts:
- if o == '-b':
- transform_base = a
- elif o == '-c':
- copy = True
- elif o == '-d':
- create_dir = True
- elif o == '-g':
- group = a
- elif o == '-m':
- mode = string.atoi (a, 8)
- elif o == '-o':
- owner = a
- elif o == '-s':
- strip = True
- elif o == '-t':
- transform = a
- elif o == '-h':
- print ''' Usage: $0 [OPTION]... SRCFILE DSTFILE
- or: $0 [OPTION]... SRCFILES... DIRECTORY
- or: $0 -d DIRECTORIES...
+ if o == '-b':
+ transform_base = a
+ elif o == '-c':
+ copy = True
+ elif o == '-d':
+ create_dir = True
+ elif o == '-g':
+ group = a
+ elif o == '-m':
+ mode = string.atoi (a, 8)
+ elif o == '-o':
+ owner = a
+ elif o == '-s':
+ strip = True
+ elif o == '-t':
+ transform = a
+ elif o == '-h':
+ print ''' Usage: $0 [OPTION]... SRCFILE DSTFILE
+ or: $0 [OPTION]... SRCFILES... DIRECTORY
+ or: $0 -d DIRECTORIES...
In the first form, install SRCFILE to DSTFILE, removing SRCFILE by default.
In the second, create the directory path DIR.
-t=TRANSFORM
--help display this help and exit.
--version display version info and exit.'''
- sys.exit (0)
+ sys.exit (0)
if not mode:
- if create_dir:
- mode = 0755
- else:
- mode = 0644
-
+ if create_dir:
+ mode = 0755
+ else:
+ mode = 0644
+
chown_me = []
dest = None
if not create_dir:
- dest = args.pop()
+ dest = args.pop()
for f in args:
- if create_dir:
- if os.path.isdir (f):
- continue
-
- os.makedirs (f, mode=mode)
- chown_me.append (f)
- else:
- if copy:
- if os.path.exists (dest) and not os.path.isdir (dest):
- os.remove (dest)
- shutil.copy2 (f, dest)
- else:
- shutil.move (f, dest)
+ if create_dir:
+ if os.path.isdir (f):
+ continue
+
+ os.makedirs (f, mode=mode)
+ chown_me.append (f)
+ else:
+ if copy:
+ if os.path.exists (dest) and not os.path.isdir (dest):
+ os.remove (dest)
+ shutil.copy2 (f, dest)
+ else:
+ shutil.move (f, dest)
- if os.path.isdir (dest):
- chown_me.append (os.path.join (dest, os.path.basename (f)))
- else:
- chown_me.append (dest)
+ if os.path.isdir (dest):
+ chown_me.append (os.path.join (dest, os.path.basename (f)))
+ else:
+ chown_me.append (dest)
for f in chown_me:
- os.chmod (f, mode)
- if group <> None or owner <> None:
- os.chown (f, group, owner)
-
-
+ os.chmod (f, mode)
+ if group <> None or owner <> None:
+ os.chown (f, group, owner)
+
+
-
+
import re
format_names = {'ps.gz': 'Compressed PostScript',
- 'html' : 'HTML'
- }
+ 'html' : 'HTML'
+ }
def gulp_file(f):
- try:
- i = open(f)
- i.seek (0, 2)
- n = i.tell ()
- i.seek (0,0)
- except:
- sys.stderr.write ("can't open file: %s\n" % f)
- return ''
- s = i.read (n)
- if len (s) <= 0:
- sys.stderr.write ("gulped empty file: %s\n" % f)
- i.close ()
- return s
+ try:
+ i = open(f)
+ i.seek (0, 2)
+ n = i.tell ()
+ i.seek (0,0)
+ except:
+ sys.stderr.write ("can't open file: %s\n" % f)
+ return ''
+ s = i.read (n)
+ if len (s) <= 0:
+ sys.stderr.write ("gulped empty file: %s\n" % f)
+ i.close ()
+ return s
class Latex_head:
- def __init__ (self):
- self.author = ''
- self.title = ''
- self.date = ''
- self.format = ''
-
+ def __init__ (self):
+ self.author = ''
+ self.title = ''
+ self.date = ''
+ self.format = ''
+
def read_latex_header (s):
- header = Latex_head()
- m = re.search(r'\\author{([^}]+)}', s)
- if m:
- header.author = m.group (1)
+ header = Latex_head()
+ m = re.search(r'\\author{([^}]+)}', s)
+ if m:
+ header.author = m.group (1)
- m = re.search (r'\\title{([^}]+)}',s )
- if m:
- header.title = m.group (1)
+ m = re.search (r'\\title{([^}]+)}',s )
+ if m:
+ header.title = m.group (1)
- header.formats = ['ps.gz']
- return header
+ header.formats = ['ps.gz']
+ return header
def read_bib_header (s):
- m = re.search ('% *AUTHOR *= *(.*)\n',s)
+ m = re.search ('% *AUTHOR *= *(.*)\n',s)
- header = Latex_head()
+ header = Latex_head()
- if m:
- header.author = m.group (1)
+ if m:
+ header.author = m.group (1)
- m = re.search ('% *TITLE *= *(.*)\n',s )
- if m:
- header.title = m.group (1)
+ m = re.search ('% *TITLE *= *(.*)\n',s )
+ if m:
+ header.title = m.group (1)
- header.formats = ['html']
- return header
+ header.formats = ['html']
+ return header
def read_pod_header (s):
- header = Latex_head ()
+ header = Latex_head ()
- i = re.search( '[^\n \t]', s)
- s = s[i:]
- i = re.search( '\n\n', s)
- s = s[i+2:]
- i = re.search( '\n\n', s)
- header.title = s[:i]
+ i = re.search( '[^\n \t]', s)
+ s = s[i:]
+ i = re.search( '\n\n', s)
+ s = s[i+2:]
+ i = re.search( '\n\n', s)
+ header.title = s[:i]
- header.formats = ['html']
- return header
+ header.formats = ['html']
+ return header
def read_texinfo_header (s):
- header = Latex_head ()
-
- m = re.search( '@settitle (.*)\n', s)
- if m:
- header.title = m.group (1)
- m = re.search( '@author (.*)\n', s)
- if m:
- header.author = m.group (1)
-
- header.formats = ['html', 'ps.gz']
- return header
+ header = Latex_head ()
+
+ m = re.search( '@settitle (.*)\n', s)
+ if m:
+ header.title = m.group (1)
+ m = re.search( '@author (.*)\n', s)
+ if m:
+ header.author = m.group (1)
+
+ header.formats = ['html', 'ps.gz']
+ return header
# urg
# should make a 'next_parens '
yo_chapter_re = re.compile ('chapter(\\([^)]*\\))')
def read_yodl_header (s):
- header = Latex_head ()
- report = yo_report_re.search (s)
- article = 0
- sect = 0
- chapter = 0
- if report:
- header.author = report.group (2)
- header.title = yo_report_re.group (1)
- else:
- article = yo_article_re.search (s)
- if article:
- header.author = article.group (2)
- header.title = article.group (1)
- else:
- chapter = yo_chapter_re.search (s)
- if chapter:
- header.title = chapter.group (1)
- else:
- sect = yo_sect_re.search (s)
- if sect:
- header.title = sect.group (1)
-
- header.formats = ['html']
- return header
+ header = Latex_head ()
+ report = yo_report_re.search (s)
+ article = 0
+ sect = 0
+ chapter = 0
+ if report:
+ header.author = report.group (2)
+ header.title = yo_report_re.group (1)
+ else:
+ article = yo_article_re.search (s)
+ if article:
+ header.author = article.group (2)
+ header.title = article.group (1)
+ else:
+ chapter = yo_chapter_re.search (s)
+ if chapter:
+ header.title = chapter.group (1)
+ else:
+ sect = yo_sect_re.search (s)
+ if sect:
+ header.title = sect.group (1)
+
+ header.formats = ['html']
+ return header
def print_html_head (l,o,h):
- pre =o
-
- fn = pre + h.basename
+ pre =o
+
+ fn = pre + h.basename
- t = h.filename
- if h.title :
- t = t + ': '+ h.title
+ t = h.filename
+ if h.title :
+ t = t + ': '+ h.title
- l.write ('<li>%s </a>' % t)
+ l.write ('<li>%s </a>' % t)
- if h.author:
- l.write ('<p>by %s</p>' % h.author)
+ if h.author:
+ l.write ('<p>by %s</p>' % h.author)
- for f in h.formats:
- l.write ('(<a href=%s.%s>%s</a>)' % (fn, f, format_names [f]))
- l.write ('</li>\n')
+ for f in h.formats:
+ l.write ('(<a href=%s.%s>%s</a>)' % (fn, f, format_names [f]))
+ l.write ('</li>\n')
def help ():
- sys.stdout.write (r"""Usage: ls-latex [OPTIONS]... FILE...
+ sys.stdout.write (r"""Usage: ls-latex [OPTIONS]... FILE...
Generate html index file for FILE...
Options:
-h, --help print this help
""")
- sys.exit (0)
+ sys.exit (0)
import getopt
(options, files) = getopt.getopt(sys.argv[1:],
- 'e:h', ['help', 'prefix=', 'title='])
+ 'e:h', ['help', 'prefix=', 'title='])
tex = ''
output =''
pre = ''
title = ''
for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--prefix':
- pre = a
- elif o == '--title':
- title = a
- elif o == '-h' or o == '--help':
- help ()
+ o = opt[0]
+ a = opt[1]
+ if o == '--prefix':
+ pre = a
+ elif o == '--title':
+ title = a
+ elif o == '-h' or o == '--help':
+ help ()
l = sys.stdout
read_header_funcs = {
- 'pod' : read_pod_header,
- 'tex' : read_latex_header,
- 'doc' : read_latex_header,
- 'bib': read_bib_header,
- 'latex' : read_latex_header,
- 'tely' : read_texinfo_header,
- 'texi': read_texinfo_header,
- 'yo': read_yodl_header,
-}
+ 'pod' : read_pod_header,
+ 'tex' : read_latex_header,
+ 'doc' : read_latex_header,
+ 'bib': read_bib_header,
+ 'latex' : read_latex_header,
+ 'tely' : read_texinfo_header,
+ 'texi': read_texinfo_header,
+ 'yo': read_yodl_header,
+}
for x in files:
- m = re.search ('\\.([^.]*)$', x)
- if m == None:
- continue
+ m = re.search ('\\.([^.]*)$', x)
+ if m == None:
+ continue
- s = gulp_file (x)
- head = read_header_funcs [m.group(1)] (s)
+ s = gulp_file (x)
+ head = read_header_funcs [m.group(1)] (s)
- head.filename = x
- head.basename = re.sub ("\\.[^.]+", '', x)
-
- print_html_head (l, pre, head)
+ head.filename = x
+ head.basename = re.sub ("\\.[^.]+", '', x)
+
+ print_html_head (l, pre, head)
l.write ('</ul></body></html>')
VERSION = sys.argv[1]
defs = []
for i in open (VERSION).readlines ():
- i = re.sub ('#.*','', i)
- m = re.search ('([^ =]*)[\t ]*=[ \t]*([^ \t]*)[ \t]*\n', i)
- if m:
- defs.append ((m.group (1), m.group (2)))
+ i = re.sub ('#.*','', i)
+ m = re.search ('([^ =]*)[\t ]*=[ \t]*([^ \t]*)[ \t]*\n', i)
+ if m:
+ defs.append ((m.group (1), m.group (2)))
sys.stdout.write (r'''
/*
- Automatically generated from %(VERSION)s
- by %(PROGRAM)s.
+ Automatically generated from %(VERSION)s
+ by %(PROGRAM)s.
*/
#ifndef VERSION_HH
#define VERSION_HH
''' % vars ())
for name, expansion in defs:
- # GUILE leaks autoconf data into userspace.
- sys.stdout.write (r'''
+ # GUILE leaks autoconf data into userspace.
+ sys.stdout.write (r'''
#ifdef %(name)s
#undef %(name)s
#endif /* %(name)s */
#define %(name)s "%(expansion)s"
''' % vars ())
-
+
if ('MY_PATCH_LEVEL', '') in defs:
- sys.stdout.write (r'''
+ sys.stdout.write (r'''
#define NO_MY_PATCHLEVEL
#define TOPLEVEL_VERSION MAJOR_VERSION "." MINOR_VERSION "." PATCH_LEVEL
''')
else:
- sys.stdout.write (r'''
+ sys.stdout.write (r'''
#define TOPLEVEL_VERSION MAJOR_VERSION "." MINOR_VERSION "." PATCH_LEVEL "." MY_PATCH_LEVEL
''')
sys.stdout.write(r'''
#endif /* VERSION_HH */
''')
-
+
import string
def getpwname( name, pwfile='/etc/passwd' ):
- "Get password record that matches the specified name"
- try:
- _fd = open( pwfile, 'r' )
- except:
- sys.stderr.write("Error unable to locate" + pwfile + "\n")
- sys.stderr.write("Consult gnu-win32 command mkpasswd\n")
- sys.exit(1)
+ "Get password record that matches the specified name"
+ try:
+ _fd = open( pwfile, 'r' )
+ except:
+ sys.stderr.write("Error unable to locate" + pwfile + "\n")
+ sys.stderr.write("Consult gnu-win32 command mkpasswd\n")
+ sys.exit(1)
- _data = _fd.read()
- _fd.close()
-
- for _line in string.split(_data, '\n'):
- _record=string.split( _line, ':' );
- if _record[0] == name:
- return _record
- return ()
+ _data = _fd.read()
+ _fd.close()
+
+ for _line in string.split(_data, '\n'):
+ _record=string.split( _line, ':' );
+ if _record[0] == name:
+ return _record
+ return ()
def _test():
- pw = getpwname( 'jeff' )
- print pw[4]
+ pw = getpwname( 'jeff' )
+ print pw[4]
if __name__ == '__main__':
- _test()
+ _test()
package_diff_dir = '/tmp/package-diff.%s/' % os.getlogin ()
def system (cmd):
- print cmd
- s = os.system (cmd)
- if s:
- raise 'barf'
+ print cmd
+ s = os.system (cmd)
+ if s:
+ raise 'barf'
-
+
def find(pattern, dir = os.curdir):
- list = []
- names = os.listdir(dir)
- names.sort()
- for name in names:
- if name in (os.curdir, os.pardir):
- continue
- fullname = os.path.join(dir, name)
- if fnmatch.fnmatch(name, pattern):
- list.append(fullname)
- if os.path.isdir(fullname) and not os.path.islink(fullname):
- for p in _prune:
- if fnmatch.fnmatch(name, p):
- if _debug: print "skip", `fullname`
- break
- else:
- if _debug: print "descend into", `fullname`
- list = list + find(pattern, fullname)
- return list
+ list = []
+ names = os.listdir(dir)
+ names.sort()
+ for name in names:
+ if name in (os.curdir, os.pardir):
+ continue
+ fullname = os.path.join(dir, name)
+ if fnmatch.fnmatch(name, pattern):
+ list.append(fullname)
+ if os.path.isdir(fullname) and not os.path.islink(fullname):
+ for p in _prune:
+ if fnmatch.fnmatch(name, p):
+ if _debug: print "skip", `fullname`
+ break
+ else:
+ if _debug: print "descend into", `fullname`
+ list = list + find(pattern, fullname)
+ return list
topdir = os.getcwd ()
def gulp_file(f):
- try:
- i = open(f)
- i.seek (0, 2)
- n = i.tell ()
- i.seek (0,0)
- except:
- sys.stderr.write ("can't open file: %s\n" % f)
- return ''
- s = i.read (n)
- if len (s) <= 0:
- sys.stderr.write ("gulped emty file: %s\n" % f)
- i.close ()
- return s
+ try:
+ i = open(f)
+ i.seek (0, 2)
+ n = i.tell ()
+ i.seek (0,0)
+ except:
+ sys.stderr.write ("can't open file: %s\n" % f)
+ return ''
+ s = i.read (n)
+ if len (s) <= 0:
+ sys.stderr.write ("gulped emty file: %s\n" % f)
+ i.close ()
+ return s
def mailaddress():
- try:
- return os.environ['MAILADDRESS']
- except KeyError:
- return '(address unknown)'
+ try:
+ return os.environ['MAILADDRESS']
+ except KeyError:
+ return '(address unknown)'
class Flags:
- def __init__ (self):
- self.to_version = 0
- self.from_version = 0
- self.package = 0
+ def __init__ (self):
+ self.to_version = 0
+ self.from_version = 0
+ self.package = 0
flags = Flags ()
def help ():
- sys.stdout.write (
- 'Generate a patch to go to current version\n'
- ' -f, --from=FROM old is FROM\n'
- ' -h, --help print this help\n'
- ' --outdir=DIR generate in DIR\n'
- ' -o, --output=NAME write patch to NAME\n'
- ' -p, --package=DIR specify package\n'
- ' -r, --release diff against latest release\n'
- ' -t, --to=TO to version TO\n'
- ' -F, --dir-from=FROM diff from directory FROM\n'
- ' -T, --dir-to=TO diff to directory TO\n'
- )
+ sys.stdout.write (
+ 'Generate a patch to go to current version\n'
+ ' -f, --from=FROM old is FROM\n'
+ ' -h, --help print this help\n'
+ ' --outdir=DIR generate in DIR\n'
+ ' -o, --output=NAME write patch to NAME\n'
+ ' -p, --package=DIR specify package\n'
+ ' -r, --release diff against latest release\n'
+ ' -t, --to=TO to version TO\n'
+ ' -F, --dir-from=FROM diff from directory FROM\n'
+ ' -T, --dir-to=TO diff to directory TO\n'
+ )
def cleanup ():
- global from_diff, to_diff, original_dir
- os.chdir (package_diff_dir)
- sys.stderr.write ('Cleaning ... ')
- system ('rm -fr %s %s' % (from_diff, to_diff))
- sys.stderr.write ('\n')
- os.chdir (original_dir)
+ global from_diff, to_diff, original_dir
+ os.chdir (package_diff_dir)
+ sys.stderr.write ('Cleaning ... ')
+ system ('rm -fr %s %s' % (from_diff, to_diff))
+ sys.stderr.write ('\n')
+ os.chdir (original_dir)
def untar (fn):
- # system ('pwd');
- try:
- open (fn)
- except:
- sys.stderr.write ("Can't find tarball: %s\n" % fn)
- cleanup ()
- sys.exit (1)
- sys.stderr.write ("Untarring: %s\n" % fn)
- system ('gzip --quiet -dc ' + fn + '| tar xf - ')
- sys.stderr.flush ()
+ # system ('pwd');
+ try:
+ open (fn)
+ except:
+ sys.stderr.write ("Can't find tarball: %s\n" % fn)
+ cleanup ()
+ sys.exit (1)
+ sys.stderr.write ("Untarring: %s\n" % fn)
+ system ('gzip --quiet -dc ' + fn + '| tar xf - ')
+ sys.stderr.flush ()
def remove_automatic (dirnames):
- files = []
-
- for d in dirnames:
- try:
- for p in pats:
- files = files + find (p, d)
- except:
- sys.stderr.write ("Can't find dir: %s\n" % d)
- cleanup ()
- sys.exit (1)
-
- dirs = map (lambda d: find ('out*', d), dirnames)
- dirs = reduce (lambda x,y: x + y, dirs)
-
- #print dirs
-
- for d in dirs:
- if os.path.isdir (d):
- files = files + find ('*', d)
-
- for f in files:
- try:
- os.remove (f)
- except:
- sys.stderr.write ("can't remove: `" + f + "'\n'")
+ files = []
+
+ for d in dirnames:
+ try:
+ for p in pats:
+ files = files + find (p, d)
+ except:
+ sys.stderr.write ("Can't find dir: %s\n" % d)
+ cleanup ()
+ sys.exit (1)
+
+ dirs = map (lambda d: find ('out*', d), dirnames)
+ dirs = reduce (lambda x,y: x + y, dirs)
+
+ #print dirs
+
+ for d in dirs:
+ if os.path.isdir (d):
+ files = files + find ('*', d)
+
+ for f in files:
+ try:
+ os.remove (f)
+ except:
+ sys.stderr.write ("can't remove: `" + f + "'\n'")
def dirname (v):
- return flags.package.name + '-' + version_tuple_to_str (v)
+ return flags.package.name + '-' + version_tuple_to_str (v)
def tarball(v):
- return dirname (v) + '.tar.gz'
+ return dirname (v) + '.tar.gz'
def released_tarball(v):
- return flags.package.release_dir + tarball (v)
+ return flags.package.release_dir + tarball (v)
def remove_configure (dir):
- os.chdir (dir)
+ os.chdir (dir)
- # ugh
- system ('rm -f *.ly')
+ # ugh
+ system ('rm -f *.ly')
- # should do 'make distclean ?'
- system ('rm -rf debian/out GNUmakefile config.cache config.h config.hh config.log config.make config.status configure stepmake/GNUmakefile stepmake/config.hh stepmake/config.log stepmake/config.status stepmake/config.make')
+ # should do 'make distclean ?'
+ system ('rm -rf debian/out GNUmakefile config.cache config.h config.hh config.log config.make config.status configure stepmake/GNUmakefile stepmake/config.hh stepmake/config.log stepmake/config.status stepmake/config.make')
- # ugh: symlinks
- system ('rm -f stepmake/stepmake/stepmake stepmake/stepmake/bin')
+ # ugh: symlinks
+ system ('rm -f stepmake/stepmake/stepmake stepmake/stepmake/bin')
# ugh, how to get rid of .tex files generated by lily?
usage
- cd %s-source-dir; patch -E -p1 < %s
+ cd %s-source-dir; patch -E -p1 < %s
Patches do not contain automatically generated files
or (urg) empty directories,
i.e., you should run
- ./autogen.sh
- ./configure [your options.]
+ ./autogen.sh
+ ./configure [your options.]
-
+
"""
def makediff (fromdir, todir, patch_name):
- remove_automatic ([fromdir, todir])
-
- # ugh
- remove_configure (fromdir)
- remove_configure (todir)
- os.chdir (todir)
-
- fromname = fromdir
- toname = todir
- if os.path.dirname (fromname) == os.path.dirname (toname):
- fromname = os.path.basename (fromname)
- toname = os.path.basename (toname)
- fromdir = '../' + fromname
-
- f = open (patch_name, 'wb')
- f.write (header %
- (mailaddress (), fromname, toname,
- flags.package.name, os.path.basename (patch_name)))
-
- f.close ()
-
- sys.stderr.write ('diffing to %s... ' % patch_name)
- system ('pwd')
- print ('diff -purN %s . >> %s' % (fromdir, patch_name))
- system ('diff -purN %s . >> %s' % (fromdir, patch_name))
- system ('gzip --quiet -9f %s' % patch_name)
-
+ remove_automatic ([fromdir, todir])
+
+ # ugh
+ remove_configure (fromdir)
+ remove_configure (todir)
+ os.chdir (todir)
+
+ fromname = fromdir
+ toname = todir
+ if os.path.dirname (fromname) == os.path.dirname (toname):
+ fromname = os.path.basename (fromname)
+ toname = os.path.basename (toname)
+ fromdir = '../' + fromname
+
+ f = open (patch_name, 'wb')
+ f.write (header %
+ (mailaddress (), fromname, toname,
+ flags.package.name, os.path.basename (patch_name)))
+
+ f.close ()
+
+ sys.stderr.write ('diffing to %s... ' % patch_name)
+ system ('pwd')
+ print ('diff -purN %s . >> %s' % (fromdir, patch_name))
+ system ('diff -purN %s . >> %s' % (fromdir, patch_name))
+ system ('gzip --quiet -9f %s' % patch_name)
+
os.environ['GZIP'] = '-q'
print 'argv: ' + string.join (sys.argv[1:])
(options, files) = getopt.getopt (sys.argv[1:],
- 'hF:f:o:p:rT:t:', ['conf=', 'from=', 'dir-from=', 'dir-to=', 'help', 'outdir=', 'output=', 'package=', 'release', 'to='])
+ 'hF:f:o:p:rT:t:', ['conf=', 'from=', 'dir-from=', 'dir-to=', 'help', 'outdir=', 'output=', 'package=', 'release', 'to='])
patch_name = ''
conf = ''
from_version=0
to_version=0
for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--from' or o == '-f':
- from_version = a
- elif o == '--to' or o == '-t':
- to_version = a
- elif o == '--dir-from' or o == '-F':
- from_src = a;
- elif o == '--dir-to' or o == '-T':
- to_src = a;
- elif o == '--help' or o == '-h':
- help ()
- sys.exit (0)
- elif o == '--outdir':
- outdir = a
- elif o == '--conf':
- conf = a
- elif o == '--output' or o == '-o':
- patch_name = a
- elif o == '-p' or o == '--package':
- topdir = a
- elif o == '--release' or o == '-r':
- release=1
- else:
- raise getopt.error
+ o = opt[0]
+ a = opt[1]
+ if o == '--from' or o == '-f':
+ from_version = a
+ elif o == '--to' or o == '-t':
+ to_version = a
+ elif o == '--dir-from' or o == '-F':
+ from_src = a;
+ elif o == '--dir-to' or o == '-T':
+ to_src = a;
+ elif o == '--help' or o == '-h':
+ help ()
+ sys.exit (0)
+ elif o == '--outdir':
+ outdir = a
+ elif o == '--conf':
+ conf = a
+ elif o == '--output' or o == '-o':
+ patch_name = a
+ elif o == '-p' or o == '--package':
+ topdir = a
+ elif o == '--release' or o == '-r':
+ release=1
+ else:
+ raise getopt.error
sys.path.append (topdir + '/stepmake/bin')
from packagepython import *
packager = Packager ()
if from_src:
- from_package = Package (from_src)
- flags.from_version = from_package.version
+ from_package = Package (from_src)
+ flags.from_version = from_package.version
if from_version:
- flags.from_version = version_str_to_tuple (from_version)
- from_src = ''
+ flags.from_version = version_str_to_tuple (from_version)
+ from_src = ''
if to_src:
- to_package = Package (to_src)
- flags.to_version = to_package.version
+ to_package = Package (to_src)
+ flags.to_version = to_package.version
if to_version:
- flags.to_version = version_str_to_tuple (to_version)
- to_src = ''
+ flags.to_version = version_str_to_tuple (to_version)
+ to_src = ''
if not flags.to_version:
- flags.to_version = package.version
+ flags.to_version = package.version
if not flags.from_version:
- flags.from_version = prev_version (flags.to_version)
+ flags.from_version = prev_version (flags.to_version)
# urg
if release:
- flags.from_version = (flags.from_version[0],
- flags.from_version[1], flags.from_version[2], '');
+ flags.from_version = (flags.from_version[0],
+ flags.from_version[1], flags.from_version[2], '');
import tempfile
original_dir = os.getcwd ();
system ('rm -rf %s' % package_diff_dir)
try:
- os.mkdir (package_diff_dir)
+ os.mkdir (package_diff_dir)
except:
- pass
+ pass
from_diff = dirname (flags.from_version)
to_diff = dirname (flags.to_version)
if to_diff == from_diff:
- if from_src:
- from_diff = from_diff + '.src'
- elif to_src:
- to_diff = to_diff + '.src'
- else:
- sys.stderr.write (patch_name + ': nothing to do: to == from = ' + from_diff + '\n')
- sys.exit (1)
+ if from_src:
+ from_diff = from_diff + '.src'
+ elif to_src:
+ to_diff = to_diff + '.src'
+ else:
+ sys.stderr.write (patch_name + ': nothing to do: to == from = ' + from_diff + '\n')
+ sys.exit (1)
def compat_abspath (path):
- return os.path.normpath (os.path.join (os.getcwd (), path))
+ return os.path.normpath (os.path.join (os.getcwd (), path))
if conf and not outdir:
- outdir = 'out-' + conf
+ outdir = 'out-' + conf
if not patch_name:
- patch_name = os.path.join (outdir, '%s-%s-%s.diff' % (package.name,
- version_tuple_to_str (flags.from_version),
- version_tuple_to_str (flags.to_version)))
+ patch_name = os.path.join (outdir, '%s-%s-%s.diff' % (package.name,
+ version_tuple_to_str (flags.from_version),
+ version_tuple_to_str (flags.to_version)))
- patch_name = compat_abspath (patch_name)
+ patch_name = compat_abspath (patch_name)
from_diff = package_diff_dir + from_diff
to_diff = package_diff_dir + to_diff
if not from_src:
- os.chdir (package_diff_dir)
- untar (released_tarball (flags.from_version))
- os.chdir (original_dir)
+ os.chdir (package_diff_dir)
+ untar (released_tarball (flags.from_version))
+ os.chdir (original_dir)
else:
- sys.stderr.write ('copying ' + from_src + ' to ' + from_diff + '\n')
- # system ('cp -pr %s %s' % (srcdir, from_diff))
- system ('mkdir -p %s '% (from_diff))
- os.chdir (from_src)
- system ('tar cf - --exclude out --exclude out-www . \
- | tar -xf - -C %s' % from_diff)
+ sys.stderr.write ('copying ' + from_src + ' to ' + from_diff + '\n')
+ # system ('cp -pr %s %s' % (srcdir, from_diff))
+ system ('mkdir -p %s '% (from_diff))
+ os.chdir (from_src)
+ system ('tar cf - --exclude out --exclude out-www . \
+ | tar -xf - -C %s' % from_diff)
if not to_src:
- os.chdir (package_diff_dir)
- untar (released_tarball (flags.to_version))
- os.chdir (original_dir)
+ os.chdir (package_diff_dir)
+ untar (released_tarball (flags.to_version))
+ os.chdir (original_dir)
else:
- sys.stderr.write ('copying ' + to_src + ' to ' + to_diff + '\n')
- system ('mkdir -p %s '% (to_diff))
- # system ('cp -pr %s %s' (to_src, to_diff))%
- os.chdir (to_src)
- system ('tar -cf - --exclude out --exclude out-www . \
- . | tar -xf - -C %s ' % to_diff)
+ sys.stderr.write ('copying ' + to_src + ' to ' + to_diff + '\n')
+ system ('mkdir -p %s '% (to_diff))
+ # system ('cp -pr %s %s' (to_src, to_diff))%
+ os.chdir (to_src)
+ system ('tar -cf - --exclude out --exclude out-www . \
+ . | tar -xf - -C %s ' % to_diff)
os.chdir (to_diff)
makediff (from_diff, to_diff, patch_name)
make_assign_re = re.compile ('^([A-Z_]*)=(.*)$')
def read_makefile (fn):
- file = open (fn)
- lines = file.readlines()
+ file = open (fn)
+ lines = file.readlines()
- mi = pa = mj = 0
- mp = ''
+ mi = pa = mj = 0
+ mp = ''
- make_dict = {}
- for l in lines:
- m = make_assign_re.search (l)
- if m:
- nm = m.group (1)
- val = m.group (2)
- make_dict[nm] = val
- return make_dict
+ make_dict = {}
+ for l in lines:
+ m = make_assign_re.search (l)
+ if m:
+ nm = m.group (1)
+ val = m.group (2)
+ make_dict[nm] = val
+ return make_dict
class Package:
- def __init__ (self, dirname):
- dict = read_makefile (dirname + '/VERSION')
- version_list = []
- for x in [ 'MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_LEVEL']:
- version_list.append (string.atoi (dict[x]))
- version_list.append (dict['MY_PATCH_LEVEL'])
- self.topdir = dirname
- self.groupdir = self.topdir + '/..'
- self.patch_dir = self.groupdir + '/patches/'
- self.release_dir = self.groupdir + '/releases/'
- self.test_dir = self.groupdir + '/test/'
- self.version = tuple(version_list)
- self.Name = dict['PACKAGE_NAME']
- self.name = string.lower (self.Name)
- if self.name == 'lilypond':
- self.nickname = 'lelie'
- else:
- self.nickname = self.name
- self.NAME = string.upper (self.Name)
+ def __init__ (self, dirname):
+ dict = read_makefile (dirname + '/VERSION')
+ version_list = []
+ for x in [ 'MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_LEVEL']:
+ version_list.append (string.atoi (dict[x]))
+ version_list.append (dict['MY_PATCH_LEVEL'])
+ self.topdir = dirname
+ self.groupdir = self.topdir + '/..'
+ self.patch_dir = self.groupdir + '/patches/'
+ self.release_dir = self.groupdir + '/releases/'
+ self.test_dir = self.groupdir + '/test/'
+ self.version = tuple(version_list)
+ self.Name = dict['PACKAGE_NAME']
+ self.name = string.lower (self.Name)
+ if self.name == 'lilypond':
+ self.nickname = 'lelie'
+ else:
+ self.nickname = self.name
+ self.NAME = string.upper (self.Name)
class Packager:
- def __init__ (self):
- try:
- m= os.environ['MAILADDRESS']
- except KeyError:
- m= '(address unknown)'
- self.mail= m
- try:
- m= os.environ['WEBMASTER']
- except KeyError:
- m= self.mail
- self.webmaster= m
+ def __init__ (self):
+ try:
+ m= os.environ['MAILADDRESS']
+ except KeyError:
+ m= '(address unknown)'
+ self.mail= m
+ try:
+ m= os.environ['WEBMASTER']
+ except KeyError:
+ m= self.mail
+ self.webmaster= m
def full_version_tup(tup):
- t = [0,0,0,'']
- for i in range (4):
- try:
- t[i] = tup[i]
- except IndexError:
- break
- return tuple(t)
+ t = [0,0,0,'']
+ for i in range (4):
+ try:
+ t[i] = tup[i]
+ except IndexError:
+ break
+ return tuple(t)
def split_my_patchlevel (str):
- m = re.match ('(.*?)([0-9]*)$', str)
- return (m.group (1), string.atoi (m.group (2)))
+ m = re.match ('(.*?)([0-9]*)$', str)
+ return (m.group (1), string.atoi (m.group (2)))
def next_version(tup):
- l = list(full_version_tup (tup))
- t3name=t3num=''
- if l[3]:
- (t3name,t3num)= split_my_patchlevel (l[3])
- if t3num:
- t3num = '%d' % (t3num + 1)
- else:
- t3num = t3name =''
- else:
- l[2] = l[2] +1
-
- return tuple(l[0:3] + [t3name + t3num])
+ l = list(full_version_tup (tup))
+ t3name=t3num=''
+ if l[3]:
+ (t3name,t3num)= split_my_patchlevel (l[3])
+ if t3num:
+ t3num = '%d' % (t3num + 1)
+ else:
+ t3num = t3name =''
+ else:
+ l[2] = l[2] +1
+
+ return tuple(l[0:3] + [t3name + t3num])
def prev_version(tup):
- l = list(full_version_tup (tup))
- t3name=t3num=''
- if l[3]:
- (t3name, t3num) = split_my_patchlevel (l[3])
- if t3num and t3num - 1 > 0:
- t3num = '%d' %(t3num - 1)
- else:
- t3num = t3name =''
-
- else:
- l[2] = l[2] -1
-
- return tuple(l[0:3] + [t3name + t3num])
+ l = list(full_version_tup (tup))
+ t3name=t3num=''
+ if l[3]:
+ (t3name, t3num) = split_my_patchlevel (l[3])
+ if t3num and t3num - 1 > 0:
+ t3num = '%d' %(t3num - 1)
+ else:
+ t3num = t3name =''
+
+ else:
+ l[2] = l[2] -1
+
+ return tuple(l[0:3] + [t3name + t3num])
def version_tuple_to_str(tup):
- tup = full_version_tup (tup)
- if tup[3]:
- my = '.' + tup[3]
- else:
- my = ''
- return ('%d.%d.%d' % tup[0:3]) + my
+ tup = full_version_tup (tup)
+ if tup[3]:
+ my = '.' + tup[3]
+ else:
+ my = ''
+ return ('%d.%d.%d' % tup[0:3]) + my
def version_str_to_tuple(str):
- t = string.split(str, '.')
- mypatch = ''
- if len (t) >= 4:
- mypatch = string.join (t[3:], '.')
- return (string.atoi(t[0]), string.atoi(t[1]), string.atoi(t[2]), mypatch)
+ t = string.split(str, '.')
+ mypatch = ''
+ if len (t) >= 4:
+ mypatch = string.join (t[3:], '.')
+ return (string.atoi(t[0]), string.atoi(t[1]), string.atoi(t[2]), mypatch)
def version_compare (ltup, rtup):
- rtup = full_version_tup (ltup)
- rtup = full_version_tup (rtup)
- for i in (0,1,2):
- if ltup[i] - rtup[i]: return ltup[i] - rtup[i]
- if ltup[3] and rtup[3]:
- (lname, lnum) = split_my_patchlevel (ltup[i])
- (rname, rnum) = split_my_patchlevel (rtup[3])
- if lname != rname:
- raise 'ambiguous'
- return sign (lnum - rnum)
- if ltup[3]:
- return 1
- else:
- return -1
-
+ rtup = full_version_tup (ltup)
+ rtup = full_version_tup (rtup)
+ for i in (0,1,2):
+ if ltup[i] - rtup[i]: return ltup[i] - rtup[i]
+ if ltup[3] and rtup[3]:
+ (lname, lnum) = split_my_patchlevel (ltup[i])
+ (rname, rnum) = split_my_patchlevel (rtup[3])
+ if lname != rname:
+ raise 'ambiguous'
+ return sign (lnum - rnum)
+ if ltup[3]:
+ return 1
+ else:
+ return -1
+
if __name__ == '__main__':
- p = Package ('.')
- v= p.version
- print v, prev_version(v), next_version(v)
- pv=(0,1,1,'jcn4')
- print version_tuple_to_str(pv), prev_version(pv), next_version(pv)
- print version_tuple_to_str((0,1,1,''))
- print full_version_tup ((0,1))
-
-
+ p = Package ('.')
+ v= p.version
+ print v, prev_version(v), next_version(v)
+ pv=(0,1,1,'jcn4')
+ print version_tuple_to_str(pv), prev_version(pv), next_version(pv)
+ print version_tuple_to_str((0,1,1,''))
+ print full_version_tup ((0,1))
+
+
def dump_file(f, s):
- i = open(f, 'w')
- i.write(s)
- i.close ()
+ i = open(f, 'w')
+ i.write(s)
+ i.close ()
import time
(options, files) = getopt.getopt (sys.argv[1:], 'ho:p:',
- ['help', 'outdir=', 'package='])
+ ['help', 'outdir=', 'package='])
def help ():
- sys.stdout.write (r"""Usage: release [OPTIONS]...
+ sys.stdout.write (r"""Usage: release [OPTIONS]...
Make a tarball and patch.
Options:
- -o, --outdir=DIR specify where to leave patches
- -h, --help print this help
- -p, --package=DIR specify package"""
+ -o, --outdir=DIR specify where to leave patches
+ -h, --help print this help
+ -p, --package=DIR specify package"""
)
- sys.exit (0)
+ sys.exit (0)
topdir = ''
outdir = '.'
for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '-h' or o == '--help':
- help ()
- elif o == '-p' or o == '--package':
- topdir = a
- elif o == '--outdir' or o == '-o':
- outdir = a
-
+ o = opt[0]
+ a = opt[1]
+ if o == '-h' or o == '--help':
+ help ()
+ elif o == '-p' or o == '--package':
+ topdir = a
+ elif o == '--outdir' or o == '-o':
+ outdir = a
+
sys.path.append (topdir + '/stepmake/bin')
package = packagepython.Package (topdir)
release_tarfile = os.path.join (package.release_dir, tarball)
if os.path.exists (out_tarfile):
- os.unlink (out_tarfile)
+ os.unlink (out_tarfile)
changelog_name = os.path.join (topdir, 'ChangeLog')
lines = open (changelog_name).readlines ()
release_marker = '\t* VERSION: %(release_version)s' % vars ()
if not package.version[3] \
- and lines[2][0:len (release_marker) - 1] != release_marker:
- sys.stderr.write ("warning: ChangeLog: adding VERSION: %s\n" \
- % release_version)
- user_changelog_entry = time.strftime ('%Y-%m-%d') \
- + ' ' + os.environ['EMAIL']
- changelog = open (changelog_name, 'w')
- changelog.write (user_changelog_entry)
- changelog.write ('\n\n')
- changelog.write (release_marker)
- changelog.write ('\n\n')
- changelog.writelines (lines)
- changelog.close ()
+ and lines[2][0:len (release_marker) - 1] != release_marker:
+ sys.stderr.write ("warning: ChangeLog: adding VERSION: %s\n" \
+ % release_version)
+ user_changelog_entry = time.strftime ('%Y-%m-%d') \
+ + ' ' + os.environ['EMAIL']
+ changelog = open (changelog_name, 'w')
+ changelog.write (user_changelog_entry)
+ changelog.write ('\n\n')
+ changelog.write (release_marker)
+ changelog.write ('\n\n')
+ changelog.writelines (lines)
+ changelog.close ()
status = os.system ('make dist')
if status:
- raise 'make dist failed'
+ raise 'make dist failed'
if os.path.exists (release_tarfile):
- os.unlink (release_tarfile)
-
+ os.unlink (release_tarfile)
+
os.link (out_tarfile, release_tarfile)
diff_py = package.topdir + '/stepmake/bin/package-diff.py'
diff_py_options = '--outdir=%(outdir)s --package=%(topdir)s' % vars ()
status = os.system (string.join ((sys.executable, diff_py, diff_py_options)))
if status:
- raise 'make diff failed'
+ raise 'make diff failed'
previous_tuple = packagepython.prev_version (package.version)
previous_version = packagepython.version_tuple_to_str (previous_tuple)
diff_base = string.join ((package.name, previous_version, release_version),
- '-')
+ '-')
diff_name = diff_base + '.diff.gz'
out_diff = os.path.join (outdir, diff_name)
release_diff = os.path.join (package.patch_dir, diff_name)
if not os.path.exists (out_diff):
- sys.stderr.write ("error: cannot open: %s\n" % out_diff)
- sys.exit (1)
+ sys.stderr.write ("error: cannot open: %s\n" % out_diff)
+ sys.exit (1)
if os.path.exists (release_diff):
- os.unlink (release_diff)
+ os.unlink (release_diff)
os.link (out_diff, release_diff)
entities = {
- "&" : 'amp',
- "`" : 'apos',
- '>' : 'gt',
- '<' : 'lt',
- '"' : 'quot',
- }
+ "&" : 'amp',
+ "`" : 'apos',
+ '>' : 'gt',
+ '<' : 'lt',
+ '"' : 'quot',
+ }
def txt2html (s):
- for i in entities.keys ():
- s = re.sub (i, '\001' + entities[i] + ';', s);
- s = re.sub ('\001', '&', s);
- return s
+ for i in entities.keys ():
+ s = re.sub (i, '\001' + entities[i] + ';', s);
+ s = re.sub ('\001', '&', s);
+ return s
for a in sys.argv[1:]:
- # hmm, we need: text2html out/foe.txt -> out/foe.html,
- # -o is a bit overkill?
- # outfile = os.path.basename (os.path.splitext(a)[0]) + '.html'
- outfile = os.path.splitext(a)[0] + '.html'
-
- try:
- os.unlink(outfile)
- except:
- pass
-
- s = r"""
+ # hmm, we need: text2html out/foe.txt -> out/foe.html,
+ # -o is a bit overkill?
+ # outfile = os.path.basename (os.path.splitext(a)[0]) + '.html'
+ outfile = os.path.splitext(a)[0] + '.html'
+
+ try:
+ os.unlink(outfile)
+ except:
+ pass
+
+ s = r"""
<html>
<head>
- <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">
+ <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">
</head>
<body><pre>
%s
</pre></body></html>
""" % txt2html (open (a).read ())
- open (outfile, 'w').write (s)
+ open (outfile, 'w').write (s)