X-Git-Url: https://git.donarmstrong.com/?a=blobdiff_plain;f=scripts%2Fauxiliar%2Fmakelsr.py;h=a03466310b36cf47d7e91f12c7e6f79422a680d3;hb=ebe492ca408fb0d9abf80b94c56197eef8dc2f09;hp=80628cbf164e82a629227a9ea10bbdcd37cc6433;hpb=7302e6b7fe9bf6bc57b8c942991de1abe36d6beb;p=lilypond.git diff --git a/scripts/auxiliar/makelsr.py b/scripts/auxiliar/makelsr.py index 80628cbf16..a03466310b 100755 --- a/scripts/auxiliar/makelsr.py +++ b/scripts/auxiliar/makelsr.py @@ -5,23 +5,37 @@ import os import glob import re +sys.path.append ('python') +import langdefs + +DEST = os.path.join ('Documentation', 'snippets') +NEW_LYS = os.path.join ('Documentation', 'snippets', 'new') +TEXIDOCS = [os.path.join ('Documentation', language_code, 'texidocs') + for language_code in langdefs.LANGDICT] + USAGE = ''' Usage: makelsr.py [LSR_SNIPPETS_DIR] This script must be run from top of the source tree; -it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR. -If a snippet is present in both directories, the one from input/new is preferred. -''' - -LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it +it updates snippets %(DEST)s with snippets +from %(NEW_LYS)s or LSR_SNIPPETS_DIR. +If a snippet is present in both directories, the one +from %(NEW_LYS)s is preferred. +''' % vars () + +LY_HEADER_LSR = '''%% DO NOT EDIT this file manually; it is automatically +%% generated from LSR http://lsr.dsi.unimi.it +%% Make any changes in LSR itself, or in Documentation/snippets/new/ , +%% and then run scripts/auxiliar/makelsr.py +%% %% This file is in the public domain. ''' -LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new +LY_HEADER_NEW = '''%% DO NOT EDIT this file manually; it is automatically +%% generated from %s +%% Make any changes in Documentation/snippets/new/ +%% and then run scripts/auxiliar/makelsr.py +%% %% This file is in the public domain. -''' - -DEST = os.path.join ('input', 'lsr') -NEW_LYS = os.path.join ('input', 'new') -TEXIDOCS = os.path.join ('input', 'texidocs') +''' % NEW_LYS TAGS = [] # NR 1 @@ -51,6 +65,18 @@ if len (sys.argv) >= 2: else: in_dir = '' +# which convert-ly to use +if os.path.isfile("out/bin/convert-ly"): + conv_path='out/bin/' +elif os.path.isfile("build/out/bin/convert-ly"): + conv_path='build/out/bin/' +else: + conv_path='' +convert_ly=conv_path+'convert-ly' +lilypond_bin=conv_path+'lilypond' + +print 'using '+convert_ly + unsafe = [] unconverted = [] notags_files = [] @@ -58,23 +84,64 @@ notags_files = [] # mark the section that will be printed verbatim by lilypond-book end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S) +doctitle_re = re.compile (r'(doctitle[a-zA-Z_]{0,6}\s*=\s*")((?:\\"|[^"\n])*)"') +texinfo_q_re = re.compile (r'@q{(.*?)}') +texinfo_qq_re = re.compile (r'@qq{(.*?)}') +def doctitle_sub (title_match): + # Comma forbidden in Texinfo node name + title = title_match.group (2).replace (',', '') + title = texinfo_q_re.sub (r"`\1'", title) + title = texinfo_qq_re.sub (r'\"\1\"', title) + return title_match.group (1) + title + '"' + def mark_verbatim_section (ly_code): return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1) # '% LSR' comments are to be stripped lsr_comment_re = re.compile (r'\s*%+\s*LSR.*') - begin_header_re = re.compile (r'\\header\s*{', re.M) - ly_new_version_re = re.compile (r'\\version\s*"(.+?)"') +strip_white_spaces_re = re.compile (r'[ \t]+(?=\n)') # add tags to ly files from LSR def add_tags (ly_code, tags): - return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1) + return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', + ly_code, 1) # for snippets from input/new, add message for earliest working version def add_version (ly_code): - return '''%% Note: this file works from version ''' + ly_new_version_re.search (ly_code).group (1) + '\n' + return '''%% Note: this file works from version ''' + \ + ly_new_version_re.search (ly_code).group (1) + '\n' + +s = 'Translation of GIT [Cc]ommittish' +texidoc_chunk_re = re.compile (r'^(?:%+\s*' + s + \ + r'.+)?\s*(?:texidoc|doctitle)([a-zA-Z]{2,4})\s+=(?:.|\n)*?(?=%+\s*' + \ + s + r'|\n\} % begin verbatim|\n (?:doctitle|texidoc|lsrtags) |$(?!.|\n))', re.M) + +def update_translated_texidoc (m, snippet_path, visited_languages): + base = os.path.splitext (os.path.basename (snippet_path))[0] + language_code = m.group (1) + visited_languages.append (language_code) + texidoc_path = os.path.join ('Documentation', language_code, + 'texidocs', base + '.texidoc') + if os.path.isfile (texidoc_path): + return open (texidoc_path).read () + else: + return m.group (0) + +def escape_backslashes_in_header(snippet): + # ASSUME: the \header exists. + header_char_number_start = snippet.find('\header {') + header_char_number_end = snippet.find('} % begin verbatim') + + header = snippet[header_char_number_start:header_char_number_end] + # two levels of escaping happening here -- 4\ means 1\ + # and the 10\ means two \ backslashes (that's 8\ ), and + # one backreference to group 1 (that's two 2\ ). + new_header = re.sub("@code\{\\\\([a-zA-Z])", "@code{\\\\\\\\\\1", header) + escaped_snippet = (snippet[:header_char_number_start] + + new_header + snippet[header_char_number_end:]) + return escaped_snippet def copy_ly (srcdir, name, tags): global unsafe @@ -83,16 +150,18 @@ def copy_ly (srcdir, name, tags): tags = ', '.join (tags) s = open (os.path.join (srcdir, name)).read () - texidoc_translations_path = os.path.join (TEXIDOCS, - os.path.splitext (name)[0] + '.texidoc') - if os.path.exists (texidoc_translations_path): - texidoc_translations = open (texidoc_translations_path).read () - # Since we want to insert the translations verbatim using a - # regexp, \\ is understood as ONE escaped backslash. So we have - # to escape those backslashes once more... - texidoc_translations = texidoc_translations.replace ('\\', '\\\\') - s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1) - + for path in TEXIDOCS: + texidoc_translation_path = \ + os.path.join (path, os.path.splitext (name)[0] + '.texidoc') + if os.path.exists (texidoc_translation_path): + texidoc_translation = open (texidoc_translation_path).read () + # Since we want to insert the translations verbatim using a + # regexp, \\ is understood as ONE escaped backslash. So we have + # to escape those backslashes once more... + texidoc_translation = texidoc_translation.replace ('\\', '\\\\') + s = begin_header_re.sub ('\\g<0>\n' + texidoc_translation, s, 1) + + s = doctitle_re.sub (doctitle_sub, s) if in_dir and in_dir in srcdir: s = LY_HEADER_LSR + add_tags (s, tags) else: @@ -100,17 +169,18 @@ def copy_ly (srcdir, name, tags): s = mark_verbatim_section (s) s = lsr_comment_re.sub ('', s) + s = strip_white_spaces_re.sub ('', s) + s = escape_backslashes_in_header (s) open (dest, 'w').write (s) - e = os.system ("convert-ly -e '%s'" % dest) + e = os.system (convert_ly+(" -d -e '%s'" % dest)) if e: unconverted.append (dest) if os.path.exists (dest + '~'): os.remove (dest + '~') # no need to check snippets from input/new if in_dir and in_dir in srcdir: - # -V seems to make unsafe snippets fail nicer/sooner - e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest) + e = os.system ("%s -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" %(lilypond_bin, dest)) if e: unsafe.append (dest) @@ -156,6 +226,38 @@ def dump_file_list (file, file_list, update=False): f = open (file, 'w') f.write ('\n'.join (sorted (new_list)) + '\n') +def update_ly_in_place (snippet_path): + visited_languages = [] + contents = open (snippet_path).read () + contents = texidoc_chunk_re.sub \ + (lambda m: update_translated_texidoc (m, + snippet_path, + visited_languages), + contents) + need_line_break_workaround = False + for language_code in langdefs.LANGDICT: + if not language_code in visited_languages: + base = os.path.splitext (os.path.basename (snippet_path))[0] + texidoc_path = os.path.join ('Documentation', language_code, + 'texidocs', base + '.texidoc') + if os.path.isfile (texidoc_path): + texidoc_translation = open (texidoc_path).read () + texidoc_translation = texidoc_translation.replace ('\\', '\\\\') + contents = begin_header_re.sub ('\\g<0>\n' + texidoc_translation, contents, 1) + else: + need_line_break_workaround = True + contents = doctitle_re.sub (doctitle_sub, contents) + contents = escape_backslashes_in_header (contents) + + # workaround for a bug in the regex's that I'm not smart + # enough to figure out. -gp + if need_line_break_workaround: + first_translated = contents.find('%% Translation of') + keep = contents[:first_translated+5] + contents = keep + contents[first_translated+5:].replace('%% Translation of', '\n%% Translation of') + + open (snippet_path, 'w').write (contents) + if in_dir: ## clean out existing lys and generated files map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) + @@ -171,6 +273,10 @@ if in_dir: tag_lists[t].update (l[t]) else: snippets, tag_lists = read_source (NEW_LYS) + ## update texidocs of snippets that don't come from NEW_LYS + for snippet_path in glob.glob (os.path.join (DEST, '*.ly')): + if not os.path.basename (snippet_path) in snippets: + update_ly_in_place (snippet_path) for (name, (srcdir, tags)) in snippets.items (): copy_ly (srcdir, name, tags) @@ -188,7 +294,7 @@ if unsafe: sys.stderr.write (''' Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY! - git add input/lsr/*.ly + git add %s/*.ly xargs git diff HEAD < lsr-unsafe.txt -''') +''' % DEST)