From: John Mandereau Date: Sat, 19 Apr 2008 23:04:43 +0000 (+0200) Subject: Make ly snippets variable names and comments translatable X-Git-Tag: release/2.11.45-1~39 X-Git-Url: https://git.donarmstrong.com/?a=commitdiff_plain;h=1eeec67c6d68aa6f64115d69ae1786f438aa8676;p=lilypond.git Make ly snippets variable names and comments translatable Also clean up texi-langutils.py --- diff --git a/Documentation/GNUmakefile b/Documentation/GNUmakefile index 1524efc58b..610e9130b2 100644 --- a/Documentation/GNUmakefile +++ b/Documentation/GNUmakefile @@ -43,7 +43,7 @@ new-lang: cp fr/GNUmakefile $(ISOLANG) cp fr/user/GNUmakefile $(ISOLANG)/user sed -i -e 's/ISOLANG *= *fr/ISOLANG = $(ISOLANG)/' $(ISOLANG)/GNUmakefile $(ISOLANG)/user/GNUmakefile - $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -b "UNTRANSLATED NODE: IGNORE ME" -l $(ISOLANG) -o doc.pot --skeleton --gettext ../user/lilypond-learning.tely + $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) -o doc.pot --skeleton --gettext ../user/lilypond-learning.tely mv $(outdir)/*.*tely $(ISOLANG)/user msgmerge -U po/lilypond-doc.pot $(outdir)/doc.pot cp po/lilypond-doc.pot po/$(ISOLANG).po @@ -53,7 +53,7 @@ CHECKED_FILES = $(ISOLANG)/index.html.in $(shell find $(ISOLANG)/user/ -maxdepth TELY_FILES = $(call src-wildcard,$(ISOLANG)/user/*.tely) skeleton-update: - $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -b "UNTRANSLATED NODE: IGNORE ME" -l $(ISOLANG) --skeleton $(TELY_FILES:$(ISOLANG)/user/%.tely=../user/%.tely) + $(PYTHON) $(buildscript-dir)/texi-langutils.py -d $(outdir) -l $(ISOLANG) --skeleton $(TELY_FILES:$(ISOLANG)/user/%.tely=../user/%.tely) $(PYTHON) $(buildscript-dir)/texi-skeleton-update.py $(ISOLANG)/user $(outdir) snippet-update: diff --git a/Documentation/po/GNUmakefile b/Documentation/po/GNUmakefile index 4542740e1d..9eb5215289 100644 --- a/Documentation/po/GNUmakefile +++ b/Documentation/po/GNUmakefile @@ -1,6 +1,7 @@ depth = ../.. STEPMAKE_TEMPLATES=podir +LOCALSTEPMAKE_TEMPLATES=lilypond include $(depth)/make/stepmake.make diff --git a/buildscripts/texi-langutils.py b/buildscripts/texi-langutils.py index a2d8958f56..33f130d158 100644 --- a/buildscripts/texi-langutils.py +++ b/buildscripts/texi-langutils.py @@ -8,6 +8,8 @@ import re import getopt import os +import langdefs + def read_pipe (command): print command pipe = os.popen (command) @@ -24,7 +26,10 @@ make_gettext = ('--gettext', '') in optlist # --gettext generate a node lis make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source output_file = 'doc.pot' -node_blurb = '' +node_blurb = '''@ifhtml +UNTRANSLATED NODE: IGNORE ME +@end ifhtml +''' doclang = '' head_committish = read_pipe ('git-rev-parse HEAD') intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*- @@ -37,94 +42,115 @@ intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*- ''' end_blurb = """ --- SKELETON FILE -- -When you actually translate this file, please remove these lines as -well as all `UNTRANSLATED NODE: IGNORE ME' lines. +@c -- SKELETON FILE -- """ for x in optlist: - if x[0] == '-o': # -o NAME set PO output file name to NAME - output_file = x[1] - elif x[0] == '-d': # -d DIR set working directory to DIR - os.chdir (x[1]) - elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB - node_blurb = x[1] - elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB - intro_blurb = x[1] - elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG - doclang = '; documentlanguage: ' + x[1] + if x[0] == '-o': # -o NAME set PO output file name to NAME + output_file = x[1] + elif x[0] == '-d': # -d DIR set working directory to DIR + os.chdir (x[1]) + elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB + node_blurb = x[1] + elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB + intro_blurb = x[1] + elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG + doclang = '; documentlanguage: ' + x[1] + +texinfo_with_menus_re = re.compile (r"^(\*) +([^:\n]+)::.*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M) + +texinfo_re = re.compile (r"^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M) +ly_string_re = re.compile (r'^([a-zA-Z]+)[\t ]*=|%+[\t ]*(.*)$') +verbatim_ly_re = re.compile (r'@lilypond\[.*?verbatim') def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None): - try: - f = open (texifilename, 'r') - texifile = f.read () - f.close () - includes = [] - if write_skeleton: - g = open (os.path.basename (texifilename), 'w') - subst = globals () - subst.update (locals ()) - g.write (i_blurb % subst) - tutu = re.findall (r"""^(\*) +([^: - ]+)::[^ - ]*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *([^ - ]*)[^ - ]*?$|@(rglos){(.+?)}""", texifile, re.M) - node_trigger = False - for item in tutu: - if item[0] == '*': - g.write ('* ' + item[1] + '::\n') - elif output_file and item[4] == 'rglos': - output_file.write ('_(r"' + item[5] + '") # @rglos in ' + texifilename + '\n') - else: - g.write ('@' + item[2] + ' ' + item[3] + '\n') - if node_trigger: - g.write (n_blurb) - node_trigger = False - if not item[2] in ('include', 'menu', 'end menu'): - if output_file: - output_file.write ('_(r"' + item[3].strip () + '") # @' + item[2] + \ - ' in ' + texifilename + '\n') - if item[2] == 'node': - node_trigger = True - elif item[2] == 'include': - includes.append(item[3]) - g.write (end_blurb) - g.close () - elif output_file: - toto = re.findall (r"""^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *([^ - ]*)[^ - ]*?$|@(rglos){(.+?)}""", texifile, re.M) - for item in toto: - if item[0] == 'include': - includes.append(item[1]) - elif item[2] == 'rglos': - output_file.write ('# @rglos in ' + texifilename + '\n_(r"' + item[3] + '")\n') - else: - output_file.write ('# @' + item[0] + ' in ' + texifilename + '\n_(r"' + item[1].strip () + '")\n') - if process_includes: - dir = os.path.dirname (texifilename) - for item in includes: - process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file) - except IOError, (errno, strerror): - print "I/O error(%s): %s: %s" % (errno, texifilename, strerror) + try: + f = open (texifilename, 'r') + texifile = f.read () + f.close () + printedfilename = texifilename.replace ('../','') + includes = [] + + # process ly var names and comments + if output_file: + lines = texifile.splitlines () + i = 0 + in_verb_ly_block = False + for i in range (len (lines)): + if verbatim_ly_re.match (lines[i]): + in_verb_ly_block = True + elif lines[i].startswith ('@end lilypond'): + in_verb_ly_block = False + elif in_verb_ly_block: + for (var, comment) in ly_string_re.findall (lines[i]): + if var: + output_file.write ('# ' + printedfilename + ':' + \ + str (i + 1) + ' (variable)\n_(r"' + var + '")\n') + elif comment: + output_file.write ('# ' + printedfilename + ':' + \ + str (i + 1) + ' (comment)\n_(r"' + comment + '")\n') + + # process Texinfo node names and section titles + if write_skeleton: + g = open (os.path.basename (texifilename), 'w') + subst = globals () + subst.update (locals ()) + g.write (i_blurb % subst) + tutu = texinfo_with_menus_re.findall (texifile) + node_trigger = False + for item in tutu: + if item[0] == '*': + g.write ('* ' + item[1] + '::\n') + elif output_file and item[4] == 'rglos': + output_file.write ('_(r"' + item[5] + '") # @rglos in ' + printedfilename + '\n') + else: + g.write ('@' + item[2] + ' ' + item[3] + '\n') + if node_trigger: + g.write (n_blurb) + node_trigger = False + if not item[2] in ('include', 'menu', 'end menu'): + if output_file: + output_file.write ('# @' + item[2] + ' in ' + \ + printedfilename + '\n_(r"' + item[3].strip () + '")\n') + if item[2] == 'node': + node_trigger = True + elif item[2] == 'include': + includes.append(item[3]) + g.write (end_blurb) + g.close () + + elif output_file: + toto = texinfo_re.findall (texifile) + for item in toto: + if item[0] == 'include': + includes.append(item[1]) + elif item[2] == 'rglos': + output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n') + else: + output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n') + if process_includes: + dir = os.path.dirname (texifilename) + for item in includes: + process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file) + except IOError, (errno, strerror): + print "I/O error(%s): %s: %s" % (errno, texifilename, strerror) if intro_blurb != '': - intro_blurb += '\n\n' + intro_blurb += '\n\n' if node_blurb != '': - node_blurb = '\n' + node_blurb + '\n\n' + node_blurb = '\n' + node_blurb + '\n\n' if make_gettext: - node_list_filename = 'node_list' - node_list = open (node_list_filename, 'w') - node_list.write ('# -*- coding: utf-8 -*-\n') - for texi_file in texi_files: - process_texi (texi_file, intro_blurb, node_blurb, make_skeleton, os.path.basename (texi_file), node_list) - for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'): - node_list.write ('_(r"' + word + '")\n') - node_list.close () - os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename) + node_list_filename = 'node_list' + node_list = open (node_list_filename, 'w') + node_list.write ('# -*- coding: utf-8 -*-\n') + for texi_file in texi_files: + process_texi (texi_file, intro_blurb, node_blurb, make_skeleton, os.path.basename (texi_file), node_list) + for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'): + node_list.write ('_(r"' + word + '")\n') + node_list.close () + os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename) else: - for texi_file in texi_files: - process_texi (texi_file, intro_blurb, node_blurb, make_skeleton, os.path.basename (texi_file)) + for texi_file in texi_files: + process_texi (texi_file, intro_blurb, node_blurb, make_skeleton, os.path.basename (texi_file)) diff --git a/make/doclang-rules.make b/make/doclang-rules.make index 6c4dbc532d..351b9db2ce 100644 --- a/make/doclang-rules.make +++ b/make/doclang-rules.make @@ -16,6 +16,10 @@ $(outdir)/version.%: $(top-src-dir)/VERSION echo $(TOPLEVEL_VERSION)>> $@ echo '@end macro'>> $@ +# This makes sure lilypond-doc gettext domain has been compiled +# before lilypond-book runs +%.tely: doc-po + $(OUT_TEXI_FILES): $(ITELY_FILES) $(ITEXI_FILES) $(DEEP_HTML_FILES) $(PDF_FILES): $(ITELY_FILES) $(ITEXI_FILES) diff --git a/scripts/lilypond-book.py b/scripts/lilypond-book.py index 7be0bc78b6..81cb784550 100644 --- a/scripts/lilypond-book.py +++ b/scripts/lilypond-book.py @@ -42,6 +42,7 @@ import tempfile import lilylib as ly import fontextract +import langdefs global _;_=ly._ @@ -829,6 +830,27 @@ def verbatim_html (s): re.sub ('<', '<', re.sub ('&', '&', s))) +ly_var_def_re = re.compile (r'^([a-zA-Z]+)[\t ]*=', re.M) +ly_comment_re = re.compile (r'(%+[\t ]*)(.*)$', re.M) + +def ly_comment_gettext (t, m): + return m.group (1) + t (m.group (2)) + +def verb_ly_gettext (s): + if not document_language: + return s + try: + t = langdefs.translation[document_language] + except: + return s + + s = ly_comment_re.sub (lambda m: ly_comment_gettext (t, m), s) + + for v in ly_var_def_re.findall (s): + s = re.sub (r"(?m)(^|[' \\#])%s([^a-zA-Z])" % v, + "\\1" + t (v) + "\\2", + s) + return s texinfo_lang_re = re.compile ('(?m)^@documentlanguage (.*?)( |$)') def set_default_options (source, default_ly_options, format): @@ -920,7 +942,7 @@ class LilypondSnippet (Snippet): self.do_options (os, self.type) def verb_ly (self): - return self.substring ('code') + return verb_ly_gettext (self.substring ('code')) def ly (self): contents = self.substring ('code') @@ -1372,7 +1394,7 @@ class LilypondFileSnippet (LilypondSnippet): s = self.contents s = re_begin_verbatim.split (s)[-1] s = re_end_verbatim.split (s)[0] - return s + return verb_ly_gettext (s) def ly (self): name = self.substring ('filename')