X-Git-Url: https://git.donarmstrong.com/?a=blobdiff_plain;ds=sidebyside;f=scripts%2Fauxiliar%2Fmakelsr.py;h=ca7db5c5b2ca900fe047e6fc21bd91433bbcfa05;hb=c1cc117bb917b1fd2f91e8d978f3674583e99528;hp=30f6fdc5ba7939691dc543b7eadde1616835b669;hpb=c89f60a26898bf31629a6f7969e13fa837612270;p=lilypond.git diff --git a/scripts/auxiliar/makelsr.py b/scripts/auxiliar/makelsr.py index 30f6fdc5ba..ca7db5c5b2 100755 --- a/scripts/auxiliar/makelsr.py +++ b/scripts/auxiliar/makelsr.py @@ -66,14 +66,22 @@ notags_files = [] # mark the section that will be printed verbatim by lilypond-book end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S) +doctitle_re = re.compile (r'(doctitle[a-zA-Z_]{0,6}\s*=\s*")((?:\\"|[^"\n])*)"') +texinfo_q_re = re.compile (r'@q{(.*?)}') +texinfo_qq_re = re.compile (r'@qq{(.*?)}') +def doctitle_sub (title_match): + # Comma forbidden in Texinfo node name + title = title_match.group (2).replace (',', '') + title = texinfo_q_re.sub (r"`\1'", title) + title = texinfo_qq_re.sub (r'\"\1\"', title) + return title_match.group (1) + title + '"' + def mark_verbatim_section (ly_code): return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1) # '% LSR' comments are to be stripped lsr_comment_re = re.compile (r'\s*%+\s*LSR.*') - begin_header_re = re.compile (r'\\header\s*{', re.M) - ly_new_version_re = re.compile (r'\\version\s*"(.+?)"') # add tags to ly files from LSR @@ -86,6 +94,22 @@ def add_version (ly_code): return '''%% Note: this file works from version ''' + \ ly_new_version_re.search (ly_code).group (1) + '\n' +s = 'Translation of GIT [Cc]ommittish' +texidoc_chunk_re = re.compile (r'^(?:%+\s*' + s + \ + r'.+)?\s*(?:texidoc|doctitle)([a-zA-Z]{2,4})\s+=(?:.|\n)*?(?=%+\s*' + \ + s + r'|\n\} % begin verbatim|\n (?:doctitle|texidoc|lsrtags) |$(?!.|\n))', re.M) + +def update_translated_texidoc (m, snippet_path, visited_languages): + base = os.path.splitext (os.path.basename (snippet_path))[0] + language_code = m.group (1) + visited_languages.append (language_code) + texidoc_path = os.path.join ('Documentation', language_code, + 'texidocs', base + '.texidoc') + if os.path.isfile (texidoc_path): + return open (texidoc_path).read () + else: + return m.group (0) + def copy_ly (srcdir, name, tags): global unsafe global unconverted @@ -104,6 +128,7 @@ def copy_ly (srcdir, name, tags): texidoc_translation = texidoc_translation.replace ('\\', '\\\\') s = begin_header_re.sub ('\\g<0>\n' + texidoc_translation, s, 1) + s = doctitle_re.sub (doctitle_sub, s) if in_dir and in_dir in srcdir: s = LY_HEADER_LSR + add_tags (s, tags) else: @@ -167,6 +192,26 @@ def dump_file_list (file, file_list, update=False): f = open (file, 'w') f.write ('\n'.join (sorted (new_list)) + '\n') +def update_ly_in_place (snippet_path): + visited_languages = [] + contents = open (snippet_path).read () + contents = texidoc_chunk_re.sub \ + (lambda m: update_translated_texidoc (m, + snippet_path, + visited_languages), + contents) + for language_code in langdefs.LANGDICT: + if not language_code in visited_languages: + base = os.path.splitext (os.path.basename (snippet_path))[0] + texidoc_path = os.path.join ('Documentation', language_code, + 'texidocs', base + '.texidoc') + if os.path.isfile (texidoc_path): + texidoc_translation = open (texidoc_path).read () + texidoc_translation = texidoc_translation.replace ('\\', '\\\\') + contents = begin_header_re.sub ('\\g<0>\n' + texidoc_translation, contents, 1) + contents = doctitle_re.sub (doctitle_sub, contents) + open (snippet_path, 'w').write (contents) + if in_dir: ## clean out existing lys and generated files map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) + @@ -182,6 +227,10 @@ if in_dir: tag_lists[t].update (l[t]) else: snippets, tag_lists = read_source (NEW_LYS) + ## update texidocs of snippets that don't come from NEW_LYS + for snippet_path in glob.glob (os.path.join (DEST, '*.ly')): + if not os.path.basename (snippet_path) in snippets: + update_ly_in_place (snippet_path) for (name, (srcdir, tags)) in snippets.items (): copy_ly (srcdir, name, tags)