X-Git-Url: https://git.donarmstrong.com/?a=blobdiff_plain;f=scripts%2Fmudela-book.py;h=6539a48e3bd74dec062927150ba2d62a24c3e393;hb=ef0696c47e7b5e3f7f2b018bca3d9eaef62439b0;hp=d82d949dfe2580997f4dde889b21b7b6feec85f3;hpb=127a1c165d3c16f971413ba36de9bb65cf3d8aa4;p=lilypond.git diff --git a/scripts/mudela-book.py b/scripts/mudela-book.py index d82d949dfe..6539a48e3b 100644 --- a/scripts/mudela-book.py +++ b/scripts/mudela-book.py @@ -1,80 +1,304 @@ #!@PYTHON@ +# vim: set noexpandtab: +# TODO: +# * Figure out clean set of options. Hmm, isn't it pretty ok now? +# * add support for .lilyrc + + +# This is was the idea for handling of comments: +# Multiline comments, @ignore .. @end ignore is scanned for +# in read_doc_file, and the chunks are marked as 'ignore', so +# mudela-book will not touch them any more. The content of the +# chunks are written to the output file. Also 'include' and 'input' +# regex has to check if they are commented out. +# +# Then it is scanned for 'mudela', 'mudela-file' and 'mudela-block'. +# These three regex's has to check if they are on a commented line, +# % for latex, @c for texinfo. +# +# Then lines that are commented out with % (latex) and @c (Texinfo) +# are put into chunks marked 'ignore'. This cannot be done before +# searching for the mudela-blocks because % is also the comment character +# for lilypond. +# +# The the rest of the rexeces are searched for. They don't have to test +# if they are on a commented out line. import os +import stat import string import re import getopt import sys import __main__ +import operator -initfile = '' program_version = '@TOPLEVEL_VERSION@' +if program_version == '@' + 'TOPLEVEL_VERSION' + '@': + program_version = '1.3.105.tca1' -cwd = os.getcwd () -include_path = [cwd] -dep_prefix = '' - -# TODO: Figure out clean set of options. - -# BUG: does not handle \verb|\begin{verbatim}\end{verbatim}| correctly. -# Should make a joint RE for \verb and \begin, \end{verbatim} +include_path = [os.getcwd()] -# TODO: add an option to read the .ly files from a previous run and dump -# the .tex file, so you can do -# -# * mudela-book file.tex -# * convert-mudela *.ly -# * mudela-book --read-lys *.ly -# +g_dep_prefix = '' +g_outdir = '' +g_force_mudela_fontsize = 0 +g_read_lys = 0 +g_do_pictures = 1 +g_num_cols = 1 +format = '' +g_run_lilypond = 1 +no_match = 'a\ba' default_music_fontsize = 16 default_text_fontsize = 12 + +class LatexPaper: + def __init__(self): + self.m_paperdef = { + # the dimentions are from geometry.sty + 'a0paper': (mm2pt(841), mm2pt(1189)), + 'a1paper': (mm2pt(595), mm2pt(841)), + 'a2paper': (mm2pt(420), mm2pt(595)), + 'a3paper': (mm2pt(297), mm2pt(420)), + 'a4paper': (mm2pt(210), mm2pt(297)), + 'a5paper': (mm2pt(149), mm2pt(210)), + 'b0paper': (mm2pt(1000), mm2pt(1414)), + 'b1paper': (mm2pt(707), mm2pt(1000)), + 'b2paper': (mm2pt(500), mm2pt(707)), + 'b3paper': (mm2pt(353), mm2pt(500)), + 'b4paper': (mm2pt(250), mm2pt(353)), + 'b5paper': (mm2pt(176), mm2pt(250)), + 'letterpaper': (in2pt(8.5), in2pt(11)), + 'legalpaper': (in2pt(8.5), in2pt(14)), + 'executivepaper': (in2pt(7.25), in2pt(10.5))} + self.m_use_geometry = None + self.m_papersize = 'letterpaper' + self.m_fontsize = 10 + self.m_num_cols = 1 + self.m_landscape = 0 + self.m_geo_landscape = 0 + self.m_geo_width = None + self.m_geo_textwidth = None + self.m_geo_lmargin = None + self.m_geo_rmargin = None + self.m_geo_includemp = None + self.m_geo_marginparwidth = {10: 57, 11: 50, 12: 35} + self.m_geo_marginparsep = {10: 11, 11: 10, 12: 10} + self.m_geo_x_marginparwidth = None + self.m_geo_x_marginparsep = None + self.__body = None + def set_geo_option(self, name, value): + if name == 'body' or name == 'text': + if type(value) == type(""): + self._set_dimen('m_geo_textwidth', value) + else: + self._set_dimen('m_geo_textwidth', value[0]) + self.__body = 1 + elif name == 'portrait': + self.m_geo_landscape = 0 + elif name == 'reversemp' or name == 'reversemarginpar': + if self.m_geo_includemp == None: + self.m_geo_includemp = 1 + elif name == 'marginparwidth' or name == 'marginpar': + self._set_dimen('m_geo_x_marginparwidth', value) + self.m_geo_includemp = 1 + elif name == 'marginparsep': + self._set_dimen('m_geo_x_marginparsep', value) + self.m_geo_includemp = 1 + elif name == 'scale': + if type(value) == type(""): + self.m_geo_width = self.get_paperwidth() * float(value) + else: + self.m_geo_width = self.get_paperwidth() * float(value[0]) + elif name == 'hscale': + self.m_geo_width = self.get_paperwidth() * float(value) + elif name == 'left' or name == 'lmargin': + self._set_dimen('m_geo_lmargin', value) + elif name == 'right' or name == 'rmargin': + self._set_dimen('m_geo_rmargin', value) + elif name == 'hdivide' or name == 'divide': + if value[0] not in ('*', ''): + self._set_dimen('m_geo_lmargin', value[0]) + if value[1] not in ('*', ''): + self._set_dimen('m_geo_width', value[1]) + if value[2] not in ('*', ''): + self._set_dimen('m_geo_rmargin', value[2]) + elif name == 'hmargin': + if type(value) == type(""): + self._set_dimen('m_geo_lmargin', value) + self._set_dimen('m_geo_rmargin', value) + else: + self._set_dimen('m_geo_lmargin', value[0]) + self._set_dimen('m_geo_rmargin', value[1]) + elif name == 'margin':#ugh there is a bug about this option in + # the geometry documentation + if type(value) == type(""): + self._set_dimen('m_geo_lmargin', value) + self._set_dimen('m_geo_rmargin', value) + else: + self._set_dimen('m_geo_lmargin', value[0]) + self._set_dimen('m_geo_rmargin', value[0]) + elif name == 'total': + if type(value) == type(""): + self._set_dimen('m_geo_width', value) + else: + self._set_dimen('m_geo_width', value[0]) + elif name == 'width' or name == 'totalwidth': + self._set_dimen('m_geo_width', value) + elif name == 'paper' or name == 'papername': + self.m_papersize = value + elif name[-5:] == 'paper': + self.m_papersize = name + else: + self._set_dimen('m_geo_'+name, value) + def _set_dimen(self, name, value): + if type(value) == type("") and value[-2:] == 'pt': + self.__dict__[name] = float(value[:-2]) + elif type(value) == type("") and value[-2:] == 'mm': + self.__dict__[name] = mm2pt(float(value[:-2])) + elif type(value) == type("") and value[-2:] == 'cm': + self.__dict__[name] = 10 * mm2pt(float(value[:-2])) + elif type(value) == type("") and value[-2:] == 'in': + self.__dict__[name] = in2pt(float(value[:-2])) + else: + self.__dict__[name] = value + def display(self): + print "LatexPaper:\n-----------" + for v in self.__dict__.keys(): + if v[:2] == 'm_': + print v, self.__dict__[v] + print "-----------" + def get_linewidth(self): + w = self._calc_linewidth() + if self.m_num_cols == 2: + return (w - 10) / 2 + else: + return w + def get_paperwidth(self): + #if self.m_use_geometry: + return self.m_paperdef[self.m_papersize][self.m_landscape or self.m_geo_landscape] + #return self.m_paperdef[self.m_papersize][self.m_landscape] + + def _calc_linewidth(self): + # since geometry sometimes ignores 'includemp', this is + # more complicated than it should be + mp = 0 + if self.m_geo_includemp: + if self.m_geo_x_marginparsep is not None: + mp = mp + self.m_geo_x_marginparsep + else: + mp = mp + self.m_geo_marginparsep[self.m_fontsize] + if self.m_geo_x_marginparwidth is not None: + mp = mp + self.m_geo_x_marginparwidth + else: + mp = mp + self.m_geo_marginparwidth[self.m_fontsize] + if self.__body:#ugh test if this is necessary + mp = 0 + def tNone(a, b, c): + return a == None, b == None, c == None + if not self.m_use_geometry: + return latex_linewidths[self.m_papersize][self.m_fontsize] + else: + if tNone(self.m_geo_lmargin, self.m_geo_width, + self.m_geo_rmargin) == (1, 1, 1): + if self.m_geo_textwidth: + return self.m_geo_textwidth + w = self.get_paperwidth() * 0.8 + return w - mp + elif tNone(self.m_geo_lmargin, self.m_geo_width, + self.m_geo_rmargin) == (0, 1, 1): + if self.m_geo_textwidth: + return self.m_geo_textwidth + return self.f1(self.m_geo_lmargin, mp) + elif tNone(self.m_geo_lmargin, self.m_geo_width, + self.m_geo_rmargin) == (1, 1, 0): + if self.m_geo_textwidth: + return self.m_geo_textwidth + return self.f1(self.m_geo_rmargin, mp) + elif tNone(self.m_geo_lmargin, self.m_geo_width, + self.m_geo_rmargin) \ + in ((0, 0, 1), (1, 0, 0), (1, 0, 1)): + if self.m_geo_textwidth: + return self.m_geo_textwidth + return self.m_geo_width - mp + elif tNone(self.m_geo_lmargin, self.m_geo_width, + self.m_geo_rmargin) in ((0, 1, 0), (0, 0, 0)): + w = self.get_paperwidth() - self.m_geo_lmargin - self.m_geo_rmargin - mp + if w < 0: + w = 0 + return w + raise "Never do this!" + def f1(self, m, mp): + tmp = self.get_paperwidth() - m * 2 - mp + if tmp < 0: + tmp = 0 + return tmp + def f2(self): + tmp = self.get_paperwidth() - self.m_geo_lmargin \ + - self.m_geo_rmargin + if tmp < 0: + return 0 + return tmp + +class TexiPaper: + def __init__(self): + self.m_papersize = 'a4' + self.m_fontsize = 12 + def get_linewidth(self): + return texi_linewidths[self.m_papersize][self.m_fontsize] + +def mm2pt(x): + return x * 2.8452756 +def in2pt(x): + return x * 72.26999 +def em2pt(x, fontsize): + return {10: 10.00002, 11: 10.8448, 12: 11.74988}[fontsize] * x +def ex2pt(x, fontsize): + return {10: 4.30554, 11: 4.7146, 12: 5.16667}[fontsize] * x + # latex linewidths: # indices are no. of columns, papersize, fontsize # Why can't this be calculated? latex_linewidths = { - 1: {'a4':{10: 345, 11: 360, 12: 390}, - 'a5':{10: 276, 11: 276, 12: 276}, - 'b5':{10: 345, 11: 356, 12: 356}, - 'letter':{10: 345, 11: 360, 12: 390}, - 'legal': {10: 345, 11: 360, 12: 390}, - 'executive':{10: 345, 11: 360, 12: 379}}, - 2: {'a4':{10: 167, 11: 175, 12: 190}, - 'a5':{10: 133, 11: 133, 12: 133}, - 'b5':{10: 167, 11: 173, 12: 173}, - 'letter':{10: 167, 11: 175, 12: 190}, - 'legal':{10: 167, 11: 175, 12: 190}, - 'executive':{10: 167, 11: 175, 12: 184}}} - + 'a4paper':{10: 345, 11: 360, 12: 390}, + 'a4paper-landscape': {10: 598, 11: 596, 12:592}, + 'a5paper':{10: 276, 11: 276, 12: 276}, + 'b5paper':{10: 345, 11: 356, 12: 356}, + 'letterpaper':{10: 345, 11: 360, 12: 390}, + 'letterpaper-landscape':{10: 598, 11: 596, 12:596}, + 'legalpaper': {10: 345, 11: 360, 12: 390}, + 'executivepaper':{10: 345, 11: 360, 12: 379}} + +texi_linewidths = { + 'a4': {12: 455}, + 'a4wide': {12: 470}, + 'smallbook': {12: 361}, + 'texidefault': {12: 433}} option_definitions = [ - ('DIM', '', 'default-mudela-fontsize', 'default fontsize for music. DIM is assumed to in points'), ('EXT', 'f', 'format', 'set format. EXT is one of texi and latex.'), - ('', 'h', 'help', 'print help'), + ('DIM', '', 'default-music-fontsize', 'default fontsize for music. DIM is assumed to be in points'), + ('DIM', '', 'default-mudela-fontsize', 'deprecated, use --default-music-fontsize'), + ('DIM', '', 'force-music-fontsize', 'force fontsize for all inline mudela. DIM is assumed be to in points'), + ('DIM', '', 'force-mudela-fontsize', 'deprecated, use --force-music-fontsize'), ('DIR', 'I', 'include', 'include path'), - ('', '', 'init', 'mudela-book initfile'), -# ('DIM', '', 'force-mudela-fontsize', 'force fontsize for all inline mudela. DIM is assumed to in points'), - ('', '', 'force-verbatim', 'make all mudela verbatim'), ('', 'M', 'dependencies', 'write dependencies'), + ('PREF', '', 'dep-prefix', 'prepend PREF before each -M dependency'), ('', 'n', 'no-lily', 'don\'t run lilypond'), ('', '', 'no-pictures', "don\'t generate pictures"), - ('FILE', 'o', 'outname', 'prefix for filenames'), + ('', '', 'read-lys', "don't write ly files."), + ('FILE', 'o', 'outname', 'filename main output file'), + ('FILE', '', 'outdir', "where to place generated files"), ('', 'v', 'version', 'print version information' ), - ('PREF', '', 'dep-prefix', 'prepend PREF before each -M dependency') + ('', 'h', 'help', 'print help'), ] -format = '' -run_lilypond = 1 -use_hash = 1 -no_match = 'a\ba' -do_pictures = 1 - # format specific strings, ie. regex-es for input, and % strings for output output_dict= { 'latex': { - 'output-mudela-fragment' : r"""\begin[eps,fragment%s]{mudela} + 'output-mudela-fragment' : r"""\begin[eps,singleline,%s]{mudela} \context Staff < \context Voice{ %s @@ -84,16 +308,22 @@ output_dict= { 'output-mudela':r"""\begin[%s]{mudela} %s \end{mudela}""", - 'output-verbatim': r"""\begin{verbatim}%s\end{verbatim}""", - 'output-default-post': r"""\def\postMudelaExample{}""", - 'output-default-pre': r"""\def\preMudelaExample{}""", - 'output-eps': '\\noindent\\parbox{\\mudelaepswidth{%s.eps}}{\includegraphics{%s.eps}}', - 'output-tex': '\\preMudelaExample \\input %s.tex \\postMudelaExample\n' + 'output-verbatim': "\\begin{verbatim}%s\\end{verbatim}", + 'output-default-post': "\\def\postMudelaExample{}\n", + 'output-default-pre': "\\def\preMudelaExample{}\n", + 'usepackage-graphics': '\\usepackage{graphics}\n', + 'output-eps': '\\noindent\\parbox{\\mudelaepswidth{%(fn)s.eps}}{\includegraphics{%(fn)s.eps}}', + 'output-tex': '\\preMudelaExample \\input %(fn)s.tex \\postMudelaExample\n', + 'pagebreak': r'\pagebreak', }, 'texi' : {'output-mudela': """@mudela[%s] %s -@end mudela +@end mudela """, + 'output-mudela-fragment': """@mudela[%s] +\context Staff\context Voice{ %s } +@end mudela """, + 'pagebreak': None, 'output-verbatim': r"""@example %s @end example @@ -110,12 +340,12 @@ output_dict= { \catcode`\@=12 \input lilyponddefs \def\EndLilyPondOutput{} -\input %s.tex +\input %(fn)s.tex \catcode`\@=0 @end tex @html
-
+
@end html
""",
}
@@ -126,35 +356,44 @@ def output_verbatim (body):
body = re.sub ('([@{}])', '@\\1', body)
return get_output ('output-verbatim') % body
+
re_dict = {
- 'latex': {'input': '\\\\mbinput{?([^}\t \n}]*)',
- 'include': '\\\\mbinclude{([^}]+)}',
-
- 'comma-sep' : ', *',
- 'header': r"""\\documentclass(\[.*?\])?""",
- 'preamble-end': '\\\\begin{document}',
- 'verbatim': r"""(?s)\\begin{verbatim}(.*?)\\end{verbatim}""",
- 'verb': r"""\\verb(.)(.*?)\1""",
- 'mudela-file': '\\\\mudelafile(\[[^\\]]+\])?{([^}]+)}',
- 'mudela' : '\\\\mudela(\[.*?\])?{(.*?)}',
- 'mudela-block': r"""(?s)\\begin(\[.*?\])?{mudela}(.*?)\\end{mudela}""",
- 'interesting-cs': '\\\\(chapter|section|twocolumn|onecolumn)',
- 'def-post-re': r"""\\def\\postMudelaExample""",
- 'def-pre-re': r"""\\def\\preMudelaExample""",
+ 'latex': {'input': r'(?m)^[^%\n]*?(?P
\\begin{document})',
+ 'verbatim': r"(?s)(?P
\\begin{verbatim}.*?\\end{verbatim})",
+ 'verb': r"(?P
\\verb(?P
.).*?(?P=del))",
+ 'mudela-file': r'(?m)^[^%\n]*?(?P.*?)})',
+ 'mudela-block': r"(?sm)^[^%\n]*?(?P
.*?)\\end{mudela})",
+ 'def-post-re': r"\\def\\postMudelaExample",
+ 'def-pre-re': r"\\def\\preMudelaExample",
+ 'usepackage-graphics': r"\usepackage{graphics}",
+ 'intertext': r',?\s*intertext=\".*?\"',
+ 'multiline-comment': no_match,
+ 'singleline-comment': r"(?m)^.*?(?P
^%.*$\n+))",
+ 'numcols': r"(?P
\\(?P
@example\s.*?@end example\s)""",
+ 'verb': r"""(?P
@code{.*?})""",
+ 'mudela-file': '(?m)^(?!@c)(?P
.*?)})',
+ 'mudela-block': r"""(?m)^(?!@c)(?P
.*?)@end mudela\s))""",
+ 'option-sep' : ', *',
+ 'intertext': r',?\s*intertext=\".*?\"',
+ 'multiline-comment': r"(?sm)^\s*(?!@c\s+)(?P
@ignore\s.*?@end ignore)\s",
+ 'singleline-comment': r"(?m)^.*?(?P
@c.*$\n+))",
+ 'numcols': no_match,
}
}
@@ -183,7 +422,6 @@ def get_output (name):
def get_re (name):
return re_dict[format][name]
-
def bounding_box_dimensions(fname):
try:
fd = open(fname)
@@ -198,332 +436,301 @@ def bounding_box_dimensions(fname):
return (0,0)
-
-read_files = []
-def find_file (name):
- f = None
- for a in include_path:
- try:
- nm = os.path.join (a, name)
- f = open (nm)
- __main__.read_files.append (nm)
- break
- except IOError:
- pass
-
-
- if f:
- return f.read ()
- else:
- error ("File not found `%s'\n" % name)
- return ''
-
def error (str):
sys.stderr.write (str + "\n Exiting ... \n\n")
raise 'Exiting.'
def compose_full_body (body, opts):
- "Construct the text of an input file: add stuff to BODY using OPTS as options."
- paper = 'a4'
+ """Construct the mudela code to send to Lilypond.
+ Add stuff to BODY using OPTS as options."""
music_size = default_music_fontsize
latex_size = default_text_fontsize
-
- cols = 1
for o in opts:
- m = re.search ('^(.*)paper$', o)
- if m:
- paper = m.group (1)
-
-
- m = re.match ('([0-9]+)pt', o)
- if m:
- music_size = string.atoi(m.group (1))
+ if g_force_mudela_fontsize:
+ music_size = g_force_mudela_fontsize
+ else:
+ m = re.match ('([0-9]+)pt', o)
+ if m:
+ music_size = string.atoi(m.group (1))
m = re.match ('latexfontsize=([0-9]+)pt', o)
if m:
latex_size = string.atoi (m.group (1))
+ if re.search ('\\\\score', body):
+ is_fragment = 0
+ else:
+ is_fragment = 1
+ if 'fragment' in opts:
+ is_fragment = 1
+ if 'nonfragment' in opts:
+ is_fragment = 0
-
- if 'twocolumn' in opts:
- cols = 2
-
-
- # urg: breaks on \include of full score
- # Use nofly option if you want to \include full score.
- if 'nofly' not in opts and not re.search ('\\\\score', body):
- opts.append ('fragment')
-
- if 'fragment' in opts and 'nosingleline' not in opts:
- opts.append ('singleline')
-
+ if is_fragment and not 'multiline' in opts:
+ opts.append('singleline')
if 'singleline' in opts:
l = -1.0;
else:
- l = latex_linewidths[cols][paper][latex_size]
-
-
- if 'relative' in opts:
+ l = paperguru.get_linewidth()
+
+ if 'relative' in opts:#ugh only when is_fragment
body = '\\relative c { %s }' % body
-
- if 'fragment' in opts:
+ if is_fragment:
body = r"""\score {
- \notes { %s }
+ \notes { %s }
\paper { }
}""" % body
- opts = uniq (opts)
- optstring = string.join (opts, ' ')
+ opts = uniq (opts)
+ optstring = string.join (opts, ' ')
optstring = re.sub ('\n', ' ', optstring)
-
body = r"""
-%% Generated by mudela-book.py; options are %s
+%% Generated by mudela-book.py; options are %s %%ughUGH not original options
\include "paper%d.ly"
\paper { linewidth = %f \pt; }
""" % (optstring, music_size, l) + body
-
return body
-def find_inclusion_chunks (regex, surround, str):
- chunks = []
- while str:
- m = regex.search (str)
+def parse_options_string(s):
+ d = {}
+ r1 = re.compile("((\w+)={(.*?)})((,\s*)|$)")
+ r2 = re.compile("((\w+)=(.*?))((,\s*)|$)")
+ r3 = re.compile("(\w+?)((,\s*)|$)")
+ while s:
+ m = r1.match(s)
+ if m:
+ s = s[m.end():]
+ d[m.group(2)] = re.split(",\s*", m.group(3))
+ continue
+ m = r2.match(s)
+ if m:
+ s = s[m.end():]
+ d[m.group(2)] = m.group(3)
+ continue
+ m = r3.match(s)
+ if m:
+ s = s[m.end():]
+ d[m.group(1)] = 1
+ continue
+ print "trøbbel:%s:" % s
+ return d
+
+def scan_latex_preamble(chunks):
+ # first we want to scan the \documentclass line
+ # it should be the first non-comment line
+ idx = 0
+ while 1:
+ if chunks[idx][0] == 'ignore':
+ idx = idx + 1
+ continue
+ m = get_re ('header').match(chunks[idx][1])
+ options = re.split (',[\n \t]*', m.group(1)[1:-1])
+ for o in options:
+ if o == 'landscape':
+ paperguru.m_landscape = 1
+ m = re.match("(.*?)paper", o)
+ if m:
+ paperguru.m_papersize = m.group()
+ else:
+ m = re.match("(\d\d)pt", o)
+ if m:
+ paperguru.m_fontsize = int(m.group(1))
+
+ break
+ while chunks[idx][0] != 'preamble-end':
+ if chunks[idx] == 'ignore':
+ idx = idx + 1
+ continue
+ m = get_re ('geometry').search(chunks[idx][1])
+ if m:
+ paperguru.m_use_geometry = 1
+ o = parse_options_string(m.group('options'))
+ for k in o.keys():
+ paperguru.set_geo_option(k, o[k])
+ idx = idx + 1
+
+def scan_texi_preamble (chunks):
+ # this is not bulletproof..., it checks the first 10 chunks
+ idx = 0
+ while 1:
+ if chunks[idx][0] == 'input':
+ if string.find(chunks[idx][1], "@afourpaper") != -1:
+ paperguru.m_papersize = 'a4'
+ elif string.find(chunks[idx][1], "@afourwide") != -1:
+ paperguru.m_papersize = 'a4wide'
+ elif string.find(chunks[idx][1], "@smallbook") != -1:
+ paperguru.m_papersize = 'smallbook'
+ idx = idx + 1
+ if idx == 10 or idx == len(chunks):
+ break
+
+def scan_preamble (chunks):
+ if __main__.format == 'texi':
+ scan_texi_preamble(chunks)
+ else:
+ assert __main__.format == 'latex'
+ scan_latex_preamble(chunks)
+
- if m == None:
- chunks.append (('input', str))
- str = ''
+def completize_preamble (chunks):
+ if __main__.format == 'texi':
+ return chunks
+ pre_b = post_b = graphics_b = None
+ for chunk in chunks:
+ if chunk[0] == 'preamble-end':
break
+ if chunk[0] == 'input':
+ m = get_re('def-pre-re').search(chunk[1])
+ if m:
+ pre_b = 1
+ if chunk[0] == 'input':
+ m = get_re('def-post-re').search(chunk[1])
+ if m:
+ post_b = 1
+ if chunk[0] == 'input':
+ m = get_re('usepackage-graphics').search(chunk[1])
+ if m:
+ graphics_b = 1
+ x = 0
+ while chunks[x][0] != 'preamble-end':
+ x = x + 1
+ if not pre_b:
+ chunks.insert(x, ('input', get_output ('output-default-pre')))
+ if not post_b:
+ chunks.insert(x, ('input', get_output ('output-default-post')))
+ if not graphics_b:
+ chunks.insert(x, ('input', get_output ('usepackage-graphics')))
+ return chunks
- chunks.append (('input', str[: m.start (0)]))
- chunks.append (('input', surround))
- chunks = chunks + read_doc_file (m.group (1))
- chunks.append (('input', surround))
+read_files = []
+def find_file (name):
+ f = None
+ for a in include_path:
+ try:
+ nm = os.path.join (a, name)
+ f = open (nm)
+ __main__.read_files.append (nm)
+ break
+ except IOError:
+ pass
+ if f:
+ return f.read ()
+ else:
+ error ("File not found `%s'\n" % name)
+ return ''
- str = str [m.end (0):]
- return chunks
+def do_ignore(match_object):
+ return [('ignore', match_object.group('code'))]
+def do_preamble_end(match_object):
+ return [('preamble-end', match_object.group('code'))]
-def find_include_chunks (str):
- return find_inclusion_chunks (get_re ('include'), '\\newpage', str)
+def make_verbatim(match_object):
+ return [('verbatim', match_object.group('code'))]
-def find_input_chunks (str):
- return find_inclusion_chunks (get_re ('input'), '', str)
+def make_verb(match_object):
+ return [('verb', match_object.group('code'))]
+
+def do_include_file(m):
+ "m: MatchObject"
+ return [('input', get_output ('pagebreak'))] \
+ + read_doc_file(m.group('filename')) \
+ + [('input', get_output ('pagebreak'))]
+
+def do_input_file(m):
+ return read_doc_file(m.group('filename'))
+
+def make_mudela(m):
+ if m.group('options'):
+ options = m.group('options')
+ else:
+ options = ''
+ return [('input', get_output('output-mudela-fragment') %
+ (options, m.group('code')))]
+
+def make_mudela_file(m):
+ if m.group('options'):
+ options = m.group('options')
+ else:
+ options = ''
+ return [('input', get_output('output-mudela') %
+ (options, find_file(m.group('filename'))))]
+
+def make_mudela_block(m):
+ if m.group('options'):
+ options = get_re('option-sep').split (m.group('options'))
+ else:
+ options = []
+ options = filter(lambda s: s != '', options)
+ return [('mudela', m.group('code'), options)]
+
+def do_columns(m):
+ if __main__.format != 'latex':
+ return []
+ if m.group('num') == 'one':
+ return [('numcols', m.group('code'), 1)]
+ if m.group('num') == 'two':
+ return [('numcols', m.group('code'), 2)]
+
+def chop_chunks(chunks, re_name, func, use_match=0):
+ newchunks = []
+ for c in chunks:
+ if c[0] == 'input':
+ str = c[1]
+ while str:
+ m = get_re (re_name).search (str)
+ if m == None:
+ newchunks.append (('input', str))
+ str = ''
+ else:
+ if use_match:
+ newchunks.append (('input', str[:m.start ('match')]))
+ else:
+ newchunks.append (('input', str[:m.start (0)]))
+ #newchunks.extend(func(m))
+ # python 1.5 compatible:
+ newchunks = newchunks + func(m)
+ str = str [m.end(0):]
+ else:
+ newchunks.append(c)
+ return newchunks
def read_doc_file (filename):
- """Read the input file, substituting for \input, \include, \mudela{} and \mudelafile"""
+ """Read the input file, find verbatim chunks and do \input and \include
+ """
str = ''
str = find_file(filename)
if __main__.format == '':
latex = re.search ('\\\\document', str[:200])
- texinfo = re.search ('@node', str[:200])
+ texinfo = re.search ('@node|@setfilename', str[:200])
if (texinfo and latex) or not (texinfo or latex):
error("error: can't determine format, please specify")
if texinfo:
__main__.format = 'texi'
else:
__main__.format = 'latex'
-
+ if __main__.format == 'texi':
+ __main__.paperguru = TexiPaper()
+ else:
+ __main__.paperguru = LatexPaper()
chunks = [('input', str)]
-
- for func in (find_verbatim_chunks, find_verb_chunks, find_include_chunks, find_input_chunks):
- newchunks = []
- for c in chunks:
- if c[0] == 'input':
- newchunks = newchunks + func (c[1])
- else:
- newchunks.append (c)
- chunks = newchunks
-
- return chunks
-
-
-
-def scan_preamble (str):
- options = []
- m = get_re ('header').search( str)
-
- # should extract paper & fontsz.
- if m and m.group (1):
- options = options + re.split (',[\n \t]*', m.group(1)[1:-1])
-
- def verbose_fontsize ( x):
- # o ??
- #if o.match('[0-9]+pt'):
- if re.match('[0-9]+pt', x):
- return 'latexfontsize=' + x
- else:
- return x
-
- options = map (verbose_fontsize, options)
-
- return options
-
-
-def completize_preamble (str):
- m = get_re ('preamble-end').search( str)
- if not m:
- return str
-
- preamble = str [:m.start (0)]
- str = str [m.start(0):]
-
- if not get_re('def-post-re').search (preamble):
- preamble = preamble + get_output('output-default-post')
- if not get_re ('def-pre-re').search( preamble):
- preamble = preamble + get_output ('output-default-pre')
-
- # UGH ! BUG!
- #if re.search ('\\\\includegraphics', str) and not re.search ('usepackage{graphics}',str):
-
- preamble = preamble + '\\usepackage{graphics}\n'
-
- return preamble + str
-
-def find_verbatim_chunks (str):
- """Chop STR into a list of tagged chunks, ie. tuples of form
- (TYPE_STR, CONTENT_STR), where TYPE_STR is one of 'input' and 'verbatim'
-
- """
-
- chunks = []
- while str:
- m = get_re ('verbatim').search( str)
- if m == None:
- chunks.append( ('input', str))
- str = ''
- else:
- chunks.append (('input', str[:m.start (0)]))
- chunks.append (('verbatim', m.group (0)))
-
- str = str [m.end(0):]
-
- return chunks
-
-def find_verb_chunks (str):
-
- chunks = []
- while str:
- m = get_re ("verb").search(str)
- if m == None:
- chunks.append (('input', str))
- str = ''
- else:
- chunks.append (('input', str[:m.start (0)]))
- chunks.append (('verbatim', m.group (0)))
- str = str [m.end(0):]
-
+ # we have to check for verbatim before doing include,
+ # because we don't want to include files that are mentioned
+ # inside a verbatim environment
+ chunks = chop_chunks(chunks, 'verbatim', make_verbatim)
+ chunks = chop_chunks(chunks, 'verb', make_verb)
+ chunks = chop_chunks(chunks, 'multiline-comment', do_ignore)
+ #ugh fix input
+ chunks = chop_chunks(chunks, 'include', do_include_file, 1)
+ chunks = chop_chunks(chunks, 'input', do_input_file, 1)
return chunks
-
-def find_mudela_shorthand_chunks (str):
- return [('input', find_mudela_shorthands(str))]
-
-def find_mudela_shorthands (b):
- def mudela_short (match):
- "Find \mudela{}, and substitute appropriate \begin / \end blocks."
- opts = match.group (1)
- if opts:
- opts = ',' + opts[1:-1]
- else:
- opts = ''
- return get_output ('output-mudela-fragment') % (opts, match.group (2))
-
- def mudela_file (match):
- "Find \mudelafile, and substitute appropriate \begin / \end blocks."
- fn = match.group (2)
- str = find_file (fn)
- opts = match.group (1)
- if opts:
- opts = opts[1:-1]
- opts = re.split (',[ \n\t]*', opts)
- else:
- opts = []
-
- if re.search ('.fly$', fn):
- opts.append ('fly')
- elif re.search ('.sly$', fn):
- opts = opts + [ 'fly','fragment']
- elif re.search ('.ly$', fn):
- opts .append ('nofly')
-
- str_opts = string.join (opts, ',')
-
- str = ("%% copied from file `%s'\n" % fn) + str
- return get_output ('output-mudela') % (str_opts, str)
-
- b = get_re('mudela-file').sub (mudela_file, b)
- b = get_re('mudela').sub (mudela_short, b)
- return b
-
-def find_mudela_chunks (str):
- """Find mudela blocks, while watching for verbatim. Returns
- (STR,MUDS) with substituted for the blocks in STR,
- and the blocks themselves MUDS"""
-
- chunks = []
- while str:
- m = get_re ("mudela-block").search( str)
- if not m:
- chunks.append (('input', str))
- str = ''
- break
-
- chunks.append (('input', str[:m.start (0)]))
-
- opts = m.group (1)
- if opts:
- opts = opts[1:-1]
- else:
- opts = ''
- optlist = get_re('comma-sep').split (opts)
-
- body = m.group (2)
- chunks.append (('mudela', body, optlist))
-
- str = str [m.end (0):]
-
- return chunks
-
-
-
-def advance_counters (counter, opts, str):
- """Advance chap/sect counters,
- revise OPTS. Return the new counter tuple"""
-
- (chapter, section, count) = counter
- done = ''
- while str:
- m = get_re ('interesting-cs').search(str)
- if not m:
- done = done + str
- str = ''
- break
-
- done = done + str[:m.end (0)]
- str = str[m.end(0):]
- g = m.group (1)
-
- if g == 'twocolumn':
- opts.append ('twocolumn')
- elif g == 'onecolumn':
- try:
- opts.remove ('twocolumn')
- except IndexError:
- pass
- elif g == 'chapter':
- (chapter, section, count) = (chapter + 1, 0, 0)
- elif g == 'section':
- (section, count) = (section + 1, 0)
-
-
- return (chapter, section, count)
-
-
-def schedule_mudela_block (base, chunk, extra_opts):
+taken_file_names = {}
+def schedule_mudela_block (chunk):
"""Take the body and options from CHUNK, figure out how the
real .ly should look, and what should be left MAIN_STR (meant
for the main file). The .ly is written, and scheduled in
@@ -534,131 +741,85 @@ def schedule_mudela_block (base, chunk, extra_opts):
TODO has format [basename, extension, extension, ... ]
"""
-
(type, body, opts) = chunk
assert type == 'mudela'
- opts = opts + extra_opts
-
+ file_body = compose_full_body (body, opts)
+ basename = `abs(hash (file_body))`
+ for o in opts:
+ m = re.search ('filename="(.*?)"', o)
+ if m:
+ basename = m.group (1)
+ if not taken_file_names.has_key(basename):
+ taken_file_names[basename] = 0
+ else:
+ taken_file_names[basename] = taken_file_names[basename] + 1
+ basename = basename + "-%i" % taken_file_names[basename]
+ if not g_read_lys:
+ update_file(file_body, os.path.join(g_outdir, basename) + '.ly')
+ needed_filetypes = ['tex']
+
+ if format == 'texi':
+ needed_filetypes.append('eps')
+ needed_filetypes.append('png')
+ if 'eps' in opts and not ('eps' in needed_filetypes):
+ needed_filetypes.append('eps')
+ outname = os.path.join(g_outdir, basename)
+ def f(base, ext1, ext2):
+ a = os.path.isfile(base + ext2)
+ if (os.path.isfile(base + ext1) and
+ os.path.isfile(base + ext2) and
+ os.stat(base+ext1)[stat.ST_MTIME] >
+ os.stat(base+ext2)[stat.ST_MTIME]) or \
+ not os.path.isfile(base + ext2):
+ return 1
+ todo = []
+ if 'tex' in needed_filetypes and f(outname, '.ly', '.tex'):
+ todo.append('tex')
+ if 'eps' in needed_filetypes and f(outname, '.tex', '.eps'):
+ todo.append('eps')
+ if 'png' in needed_filetypes and f(outname, '.eps', '.png'):
+ todo.append('png')
newbody = ''
if 'verbatim' in opts:
newbody = output_verbatim (body)
- file_body = compose_full_body (body, opts)
- basename = base
- if __main__.use_hash:
- basename = `abs(hash (file_body))`
- updated = update_file (file_body, basename + '.ly')
- todo = [basename] # UGH.
-
- if not os.path.isfile (basename + '.tex') or updated:
- todo.append ('tex')
- updated = 1
-
for o in opts:
m = re.search ('intertext="(.*?)"', o)
if m:
- newbody = newbody + m.group (1)
-
- if format == 'texi':
- opts.append ('png')
-
- if 'png' in opts:
- opts.append ('eps')
-
- if 'eps' in opts and ('tex' in todo or
- not os.path.isfile (basename + '.eps')):
- todo.append ('eps')
-
- if 'png' in opts and ('eps' in todo or
- not os.path.isfile (basename + '.png')):
- todo.append ('png')
-
+ newbody = newbody + m.group (1) + "\n\n"
if format == 'latex':
- if 'eps' in opts :
- newbody = newbody + get_output ('output-eps') % (basename, basename)
+ if 'eps' in opts:
+ s = 'output-eps'
else:
- newbody = newbody + get_output ('output-tex') % basename
-
- elif format == 'texi':
- newbody = newbody + get_output ('output-all') % (basename, basename)
+ s = 'output-tex'
+ else: # format == 'texi'
+ s = 'output-all'
+ newbody = newbody + get_output(s) % {'fn': basename }
+ return ('mudela', newbody, opts, todo, basename)
- return ('mudela', newbody, opts, todo, base)
+def process_mudela_blocks(outname, chunks):#ugh rename
+ newchunks = []
+ # Count sections/chapters.
+ for c in chunks:
+ if c[0] == 'mudela':
+ c = schedule_mudela_block (c)
+ elif c[0] == 'numcols':
+ paperguru.m_num_cols = c[2]
+ newchunks.append (c)
+ return newchunks
def find_eps_dims (match):
"Fill in dimensions of EPS files."
fn =match.group (1)
+ if g_outdir:
+ fn = os.path.join(g_outdir, fn)
dims = bounding_box_dimensions (fn)
return '%ipt' % dims[0]
-def print_chunks (ch):
- for c in ch:
- print '-->%s\n%s' % (c[0], c[1])
- if len (c) > 2:
- print '==>%s' % list (c[2:])
- print foo
-
-
-def transform_input_file (in_filename, out_filename):
- """Read the input, and deliver a list of chunks
- ready for writing.
-
- """
-
- chunks = read_doc_file (in_filename)
-
- #. Process \mudela and \mudelafile.
- for func in [find_mudela_shorthand_chunks,
- find_mudela_chunks]:
- newchunks = []
- for c in chunks:
- if c[0] == 'input':
- newchunks = newchunks + func (c[1])
- else:
- newchunks.append (c)
- chunks = newchunks
-
- opts = []
- if chunks:
- opts = scan_preamble (chunks[0][1])
-
- (chap,sect,count) = (0,0,0)
- newchunks = []
- # Count sections/chapters.
- for c in chunks:
- if c[0] == 'input':
- (chap,sect,count) = advance_counters((chap,sect,count), opts, c[1])
- elif c[0] == 'mudela':
- base = '%s-%d.%d.%d' % (out_filename, chap, sect, count)
- count = count + 1
- c = schedule_mudela_block (base, c, opts)
-
- newchunks.append (c)
-
- chunks = newchunks
- newchunks = []
-
- # Do It.
- if __main__.run_lilypond:
- compile_all_files (chunks)
-
- # finishing touch.
- for c in chunks:
- if c[0] == 'mudela' and 'eps' in c[2]:
- body = re.sub (r"""\\mudelaepswidth{(.*?)}""", find_eps_dims, c[1])
- newchunks.append (('mudela', body))
- else:
- newchunks.append (c)
- chunks = newchunks
-
- if chunks and chunks[0][0] == 'input':
- chunks[0] = ('input', completize_preamble (chunks[0][1]))
-
- return chunks
-
def system (cmd):
sys.stderr.write ("invoking `%s'\n" % cmd)
st = os.system (cmd)
@@ -670,64 +831,44 @@ def compile_all_files (chunks):
eps = []
tex = []
png = []
- hash_dict = {}
for c in chunks:
if c[0] <> 'mudela':
continue
- base = c[3][0]
- exts = c[3][1:]
+ base = c[4]
+ exts = c[3]
for e in exts:
if e == 'eps':
eps.append (base)
elif e == 'tex':
- tex.append (base + '.ly')
- elif e == 'png' and do_pictures:
+ #ugh
+ if base + '.ly' not in tex:
+ tex.append (base + '.ly')
+ elif e == 'png' and g_do_pictures:
png.append (base)
-
- if __main__.use_hash:
- hash_dict[c[4]] = c[3][0]
-
+ d = os.getcwd()
+ if g_outdir:
+ os.chdir(g_outdir)
if tex:
lilyopts = map (lambda x: '-I ' + x, include_path)
lilyopts = string.join (lilyopts, ' ' )
texfiles = string.join (tex, ' ')
system ('lilypond %s %s' % (lilyopts, texfiles))
-
for e in eps:
- if os.environ.has_key('OS') and \
- os.environ['OS'] == 'Windows_95':
- cmd = r"""ash -c 'tex " \nonstopmode \input %s " ; dvips -E -o %s %s ' """ % \
- (e, e + '.eps', e)
- else:
- cmd = r"""tex '\nonstopmode \input %s' ; dvips -E -o %s %s""" % \
- (e, e + '.eps', e)
- system (cmd)
-
+ system(r"tex '\nonstopmode \input %s'" % e)
+ system(r"dvips -E -o %s %s" % (e + '.eps', e))
for g in png:
cmd = r"""gs -sDEVICE=pgm -dTextAlphaBits=4 -dGraphicsAlphaBits=4 -q -sOutputFile=- -r90 -dNOPAUSE %s -c quit | pnmcrop | pnmtopng > %s"""
-
cmd = cmd % (g + '.eps', g + '.png')
system (cmd)
+ if g_outdir:
+ os.chdir(d)
- if __main__.use_hash:
- name = ''
- last_name = ''
- f = 0
- ks = hash_dict.keys ()
- ks.sort ()
- for i in ks:
- name = re.sub ("(.*)-[0-9]+\.[0-9]+\.[0-9]+", "\\1", i)
- name = name + '.mix'
- if name != last_name:
- if last_name:
- f.close ()
- f = open (name, 'w')
- last_name = name
- f.write ("%s:%s\n" % (i, hash_dict[i]))
-
def update_file (body, name):
+ """
+ write the body if it has changed
+ """
same = 0
try:
f = open (name)
@@ -740,12 +881,10 @@ def update_file (body, name):
f = open (name , 'w')
f.write (body)
f.close ()
-
return not same
-
def getopt_args (opts):
"Construct arguments (LONG, SHORT) for getopt from list of options."
short = ''
@@ -813,7 +952,7 @@ Options:
Report bugs to bug-gnu-music@gnu.org.
-Written by Tom Cato Amundsen