import glob
import re
-USAGE = ''' Usage: makelsr.py LSR_SNIPPETS_DIR
-This script must be run from top of the source tree;
-it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR.
-'''
+sys.path.append ('python')
+import langdefs
-LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it
-%% This file is in the public domain.
-'''
+DEST = os.path.join ('Documentation', 'snippets')
+NEW_LYS = os.path.join ('Documentation', 'snippets', 'new')
+TEXIDOCS = [os.path.join ('Documentation', language_code, 'texidocs')
+ for language_code in langdefs.LANGDICT]
-LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new
+USAGE = ''' Usage: makelsr.py [LSR_SNIPPETS_DIR]
+This script must be run from top of the source tree;
+it updates snippets %(DEST)s with snippets
+from %(NEW_LYS)s or LSR_SNIPPETS_DIR.
+If a snippet is present in both directories, the one
+from %(NEW_LYS)s is preferred.
+''' % vars ()
+
+LY_HEADER_LSR = '''%% DO NOT EDIT this file manually; it is automatically
+%% generated from LSR http://lsr.dsi.unimi.it
+%% Make any changes in LSR itself, or in Documentation/snippets/new/ ,
+%% and then run scripts/auxiliar/makelsr.py
+%%
%% This file is in the public domain.
'''
-DEST = os.path.join ('input', 'lsr')
-NEW_LYS = os.path.join ('input', 'new')
-TEXIDOCS = os.path.join ('input', 'texidocs')
+LY_HEADER_NEW = '''%% DO NOT EDIT this file manually; it is automatically
+%% generated from %s
+%% Make any changes in Documentation/snippets/new/
+%% and then run scripts/auxiliar/makelsr.py
+%%
+%% This file is in the public domain.
+''' % NEW_LYS
TAGS = []
# NR 1
'paper-and-layout', 'breaks', 'spacing', 'midi', 'titles', 'template'])
def exit_with_usage (n=0):
- sys.stderr.write (USAGE)
- sys.exit (n)
-
-try:
- in_dir = sys.argv[1]
-except:
- exit_with_usage (2)
-
-if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)):
- exit_with_usage (3)
+ sys.stderr.write (USAGE)
+ sys.exit (n)
+
+if len (sys.argv) >= 2:
+ in_dir = sys.argv[1]
+ if len (sys.argv) >= 3:
+ exit_with_usage (2)
+ if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)):
+ exit_with_usage (3)
+else:
+ in_dir = ''
+
+# which convert-ly to use
+if os.path.isfile("out/bin/convert-ly"):
+ conv_path='out/bin/'
+elif os.path.isfile("build/out/bin/convert-ly"):
+ conv_path='build/out/bin/'
+else:
+ conv_path=''
+convert_ly=conv_path+'convert-ly'
+lilypond_bin=conv_path+'lilypond'
+
+print 'using '+convert_ly
unsafe = []
unconverted = []
# mark the section that will be printed verbatim by lilypond-book
end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S)
+doctitle_re = re.compile (r'(doctitle[a-zA-Z_]{0,6}\s*=\s*")((?:\\"|[^"\n])*)"')
+texinfo_q_re = re.compile (r'@q{(.*?)}')
+texinfo_qq_re = re.compile (r'@qq{(.*?)}')
+def doctitle_sub (title_match):
+ # Comma forbidden in Texinfo node name
+ title = title_match.group (2).replace (',', '')
+ title = texinfo_q_re.sub (r"`\1'", title)
+ title = texinfo_qq_re.sub (r'\"\1\"', title)
+ return title_match.group (1) + title + '"'
+
def mark_verbatim_section (ly_code):
- return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1)
+ return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1)
# '% LSR' comments are to be stripped
lsr_comment_re = re.compile (r'\s*%+\s*LSR.*')
-
begin_header_re = re.compile (r'\\header\s*{', re.M)
+ly_new_version_re = re.compile (r'\\version\s*"(.+?)"')
+strip_white_spaces_re = re.compile (r'[ \t]+(?=\n)')
# add tags to ly files from LSR
def add_tags (ly_code, tags):
- return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1)
+ return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n',
+ ly_code, 1)
+
+# for snippets from input/new, add message for earliest working version
+def add_version (ly_code):
+ return '''%% Note: this file works from version ''' + \
+ ly_new_version_re.search (ly_code).group (1) + '\n'
+
+s = 'Translation of GIT [Cc]ommittish'
+texidoc_chunk_re = re.compile (r'^(?:%+\s*' + s + \
+ r'.+)?\s*(?:texidoc|doctitle)([a-zA-Z]{2,4})\s+=(?:.|\n)*?(?=%+\s*' + \
+ s + r'|\n\} % begin verbatim|\n (?:doctitle|texidoc|lsrtags) |$(?!.|\n))', re.M)
+
+def update_translated_texidoc (m, snippet_path, visited_languages):
+ base = os.path.splitext (os.path.basename (snippet_path))[0]
+ language_code = m.group (1)
+ visited_languages.append (language_code)
+ texidoc_path = os.path.join ('Documentation', language_code,
+ 'texidocs', base + '.texidoc')
+ if os.path.isfile (texidoc_path):
+ return open (texidoc_path).read ()
+ else:
+ return m.group (0)
+
+def escape_backslashes_in_header(snippet):
+ # ASSUME: the \header exists.
+ header_char_number_start = snippet.find('\header {')
+ header_char_number_end = snippet.find('} % begin verbatim')
+
+ header = snippet[header_char_number_start:header_char_number_end]
+ # two levels of escaping happening here -- 4\ means 1\
+ # and the 10\ means two \ backslashes (that's 8\ ), and
+ # one backreference to group 1 (that's two 2\ ).
+ new_header = re.sub("@code\{\\\\([a-zA-Z])", "@code{\\\\\\\\\\1", header)
+ escaped_snippet = (snippet[:header_char_number_start] +
+ new_header + snippet[header_char_number_end:])
+ return escaped_snippet
def copy_ly (srcdir, name, tags):
- global unsafe
- global unconverted
- dest = os.path.join (DEST, name)
- tags = ', '.join (tags)
- s = open (os.path.join (srcdir, name)).read ()
-
- texidoc_translations_path = os.path.join (TEXIDOCS,
- os.path.splitext (name)[0] + '.texidoc')
- if os.path.exists (texidoc_translations_path):
- texidoc_translations = open (texidoc_translations_path).read ()
- # Since we want to insert the translations verbatim using a
- # regexp, \\ is understood as ONE escaped backslash. So we have
- # to escape those backslashes once more...
- texidoc_translations = texidoc_translations.replace ('\\', '\\\\')
- s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1)
-
- if in_dir in srcdir:
- s = LY_HEADER_LSR + add_tags (s, tags)
- else:
- s = LY_HEADER_NEW + s
-
- s = mark_verbatim_section (s)
- s = lsr_comment_re.sub ('', s)
- open (dest, 'w').write (s)
-
- e = os.system ("convert-ly -e '%s'" % dest)
- if e:
- unconverted.append (dest)
- if os.path.exists (dest + '~'):
- os.remove (dest + '~')
- # -V seems to make unsafe snippets fail nicer/sooner
- e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest)
- if e:
- unsafe.append (dest)
+ global unsafe
+ global unconverted
+ dest = os.path.join (DEST, name)
+ tags = ', '.join (tags)
+ s = open (os.path.join (srcdir, name)).read ()
+
+ for path in TEXIDOCS:
+ texidoc_translation_path = \
+ os.path.join (path, os.path.splitext (name)[0] + '.texidoc')
+ if os.path.exists (texidoc_translation_path):
+ texidoc_translation = open (texidoc_translation_path).read ()
+ # Since we want to insert the translations verbatim using a
+ # regexp, \\ is understood as ONE escaped backslash. So we have
+ # to escape those backslashes once more...
+ texidoc_translation = texidoc_translation.replace ('\\', '\\\\')
+ s = begin_header_re.sub ('\\g<0>\n' + texidoc_translation, s, 1)
+
+ s = doctitle_re.sub (doctitle_sub, s)
+ if in_dir and in_dir in srcdir:
+ s = LY_HEADER_LSR + add_tags (s, tags)
+ else:
+ s = LY_HEADER_NEW + add_version (s) + s
+
+ s = mark_verbatim_section (s)
+ s = lsr_comment_re.sub ('', s)
+ s = strip_white_spaces_re.sub ('', s)
+ s = escape_backslashes_in_header (s)
+ open (dest, 'w').write (s)
+
+ e = os.system (convert_ly+(" -d -e '%s'" % dest))
+ if e:
+ unconverted.append (dest)
+ if os.path.exists (dest + '~'):
+ os.remove (dest + '~')
+ # no need to check snippets from input/new
+ if in_dir and in_dir in srcdir:
+ e = os.system ("%s -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" %(lilypond_bin, dest))
+ if e:
+ unsafe.append (dest)
def read_source_with_dirs (src):
- s = {}
- l = {}
- for tag in TAGS:
- srcdir = os.path.join (src, tag)
- l[tag] = set (map (os.path.basename, glob.glob (os.path.join (srcdir, '*.ly'))))
- for f in l[tag]:
- if f in s:
- s[f][1].append (tag)
- else:
- s[f] = (srcdir, [tag])
- return s, l
+ s = {}
+ l = {}
+ for tag in TAGS:
+ srcdir = os.path.join (src, tag)
+ l[tag] = set (map (os.path.basename,
+ glob.glob (os.path.join (srcdir, '*.ly'))))
+ for f in l[tag]:
+ if f in s:
+ s[f][1].append (tag)
+ else:
+ s[f] = (srcdir, [tag])
+ return s, l
tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"')
def read_source (src):
- s = {}
- l = dict ([(tag, set()) for tag in TAGS])
- for f in glob.glob (os.path.join (src, '*.ly')):
- basename = os.path.basename (f)
- m = tags_re.search (open (f, 'r').read ())
- if m:
- file_tags = [tag.strip() for tag in m.group (1). split(',')]
- s[basename] = (src, file_tags)
- [l[tag].add (basename) for tag in file_tags if tag in TAGS]
- else:
- notags_files.append (f)
- return s, l
-
-
-def dump_file_list (file, list):
- f = open (file, 'w')
- f.write ('\n'.join (list) + '\n')
-
-## clean out existing lys and generated files
-map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) +
- glob.glob (os.path.join (DEST, '*.snippet-list')))
-
-# read LSR source where tags are defined by subdirs
-snippets, tag_lists = read_source_with_dirs (in_dir)
-# read input/new where tags are directly
-s, l = read_source (NEW_LYS)
-snippets.update (s)
-for t in TAGS:
- tag_lists[t].update (l[t])
+ s = {}
+ l = dict ([(tag, set()) for tag in TAGS])
+ for f in glob.glob (os.path.join (src, '*.ly')):
+ basename = os.path.basename (f)
+ m = tags_re.search (open (f, 'r').read ())
+ if m:
+ file_tags = [tag.strip() for tag in m.group (1). split(',')]
+ s[basename] = (src, file_tags)
+ [l[tag].add (basename) for tag in file_tags if tag in TAGS]
+ else:
+ notags_files.append (f)
+ return s, l
+
+
+def dump_file_list (file, file_list, update=False):
+ if update:
+ old_list = set (open (file, 'r').read ().splitlines ())
+ old_list.update (file_list)
+ new_list = list (old_list)
+ else:
+ new_list = file_list
+ f = open (file, 'w')
+ f.write ('\n'.join (sorted (new_list)) + '\n')
+
+def update_ly_in_place (snippet_path):
+ visited_languages = []
+ contents = open (snippet_path).read ()
+ contents = texidoc_chunk_re.sub \
+ (lambda m: update_translated_texidoc (m,
+ snippet_path,
+ visited_languages),
+ contents)
+ need_line_break_workaround = False
+ for language_code in langdefs.LANGDICT:
+ if not language_code in visited_languages:
+ base = os.path.splitext (os.path.basename (snippet_path))[0]
+ texidoc_path = os.path.join ('Documentation', language_code,
+ 'texidocs', base + '.texidoc')
+ if os.path.isfile (texidoc_path):
+ texidoc_translation = open (texidoc_path).read ()
+ texidoc_translation = texidoc_translation.replace ('\\', '\\\\')
+ contents = begin_header_re.sub ('\\g<0>\n' + texidoc_translation, contents, 1)
+ else:
+ need_line_break_workaround = True
+ contents = doctitle_re.sub (doctitle_sub, contents)
+ contents = escape_backslashes_in_header (contents)
+
+ # workaround for a bug in the regex's that I'm not smart
+ # enough to figure out. -gp
+ if need_line_break_workaround:
+ first_translated = contents.find('%% Translation of')
+ keep = contents[:first_translated+5]
+ contents = keep + contents[first_translated+5:].replace('%% Translation of', '\n%% Translation of')
+
+ open (snippet_path, 'w').write (contents)
+
+if in_dir:
+ ## clean out existing lys and generated files
+ map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) +
+ glob.glob (os.path.join (DEST, '*.snippet-list')))
+
+ # read LSR source where tags are defined by subdirs
+ snippets, tag_lists = read_source_with_dirs (in_dir)
+
+ # read input/new where tags are directly defined
+ s, l = read_source (NEW_LYS)
+ snippets.update (s)
+ for t in TAGS:
+ tag_lists[t].update (l[t])
+else:
+ snippets, tag_lists = read_source (NEW_LYS)
+ ## update texidocs of snippets that don't come from NEW_LYS
+ for snippet_path in glob.glob (os.path.join (DEST, '*.ly')):
+ if not os.path.basename (snippet_path) in snippets:
+ update_ly_in_place (snippet_path)
for (name, (srcdir, tags)) in snippets.items ():
- copy_ly (srcdir, name, tags)
-
+ copy_ly (srcdir, name, tags)
for (tag, file_set) in tag_lists.items ():
- dump_file_list (os.path.join (DEST, tag + '.snippet-list'), sorted(file_set))
-
+ dump_file_list (os.path.join (DEST, tag + '.snippet-list'),
+ file_set, update=not(in_dir))
if unconverted:
- sys.stderr.write ('These files could not be converted successfully by convert-ly:\n')
- sys.stderr.write ('\n'.join (unconverted) + '\n\n')
-
+ sys.stderr.write ('These files could not be converted successfully by convert-ly:\n')
+ sys.stderr.write ('\n'.join (unconverted) + '\n\n')
if notags_files:
- sys.stderr.write ('No tags could be found in these files:\n')
- sys.stderr.write ('\n'.join (notags_files) + '\n\n')
-
-dump_file_list ('lsr-unsafe.txt', unsafe)
-sys.stderr.write ('''
+ sys.stderr.write ('No tags could be found in these files:\n')
+ sys.stderr.write ('\n'.join (notags_files) + '\n\n')
+if unsafe:
+ dump_file_list ('lsr-unsafe.txt', unsafe)
+ sys.stderr.write ('''
Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY!
- git add input/lsr/*.ly
- xargs git-diff HEAD < lsr-unsafe.txt
-
-''')
+ git add %s/*.ly
+ xargs git diff HEAD < lsr-unsafe.txt
+''' % DEST)