return output
-optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext'])
+optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext', 'head-only'])
process_includes = not ('-n', '') in optlist # -n don't process @include's in texinfo files
make_gettext = ('--gettext', '') in optlist # --gettext generate a node list from a Texinfo source
make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source
+head_only = ('--head-only', '') in optlist # --head-only only write first node in included Texinfo skeletons
-output_file = 'doc.pot'
+output_name = 'doc.pot'
# @untranslated should be defined as a macro in Texinfo source
node_blurb = '''@untranslated
'''
doclang = ''
-head_committish = read_pipe ('git-rev-parse HEAD')
-intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*-
+head_committish = read_pipe ('git rev-parse HEAD')
+intro_blurb = '''\\input texinfo @c -*- coding: utf-8; mode: texinfo%(doclang)s -*-
@c This file is part of %(topfile)s
@ignore
Translation of GIT committish: %(head_committish)s
for x in optlist:
if x[0] == '-o': # -o NAME set PO output file name to NAME
- output_file = x[1]
+ output_name = x[1]
elif x[0] == '-d': # -d DIR set working directory to DIR
+ print 'FIXME: this is evil. use cd DIR && texi-langutils ...'
+ # even better, add a sane -o option
os.chdir (x[1])
elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB
node_blurb = x[1]
lsr_verbatim_ly_re = re.compile (r'% begin verbatim$')
texinfo_verbatim_ly_re = re.compile (r'^@lilypond\[.*?verbatim')
-def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None, scan_ly=False):
+def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile,
+ output_file=None, scan_ly=False, inclusion_level=0):
try:
f = open (texifilename, 'r')
texifile = f.read ()
subst.update (locals ())
g.write (i_blurb % subst)
tutu = texinfo_with_menus_re.findall (texifile)
- node_trigger = False
+ node_just_defined = ''
for item in tutu:
if item[0] == '*':
g.write ('* ' + item[1] + '::\n')
g.write ('@menu\n')
elif item[2] == 'end menu':
g.write ('@end menu\n\n')
+ elif item[2] == 'documentlanguage':
+ g.write ('@documentlanguage ' + doclang + '\n')
else:
- g.write ('@' + item[2] + ' ' + item[3] + '\n')
- if node_trigger:
+ space = ' '
+ if item[3].startswith ('{') or not item[3].strip ():
+ space = ''
+ g.write ('@' + item[2] + space + item[3] + '\n')
+ if node_just_defined:
+ g.write ('@translationof ' + node_just_defined + '\n')
g.write (n_blurb)
- node_trigger = False
+ node_just_defined = ''
+ if head_only and inclusion_level == 1:
+ break
elif item[2] == 'include':
includes.append (item[3])
else:
output_file.write ('# @' + item[2] + ' in ' + \
printedfilename + '\n_(r"' + item[3].strip () + '")\n')
if item[2] == 'node':
- node_trigger = True
- g.write (end_blurb)
+ node_just_defined = item[3].strip ()
+ if not head_only:
+ g.write (end_blurb)
g.close ()
- elif output_file:
+ elif output_file and scan_ly:
toto = texinfo_re.findall (texifile)
for item in toto:
if item[0] == 'include':
elif item[2] == 'rglos':
output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n')
else:
- output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n')
+ output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip ().replace ('\\', r'\\') + '")\n')
- if process_includes:
+ if process_includes and (not head_only or inclusion_level < 1):
dir = os.path.dirname (texifilename)
for item in includes:
- process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file, scan_ly)
+ process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb,
+ write_skeleton, topfile, output_file, scan_ly, inclusion_level + 1)
except IOError, (errno, strerror):
sys.stderr.write ("I/O error(%s): %s: %s\n" % (errno, texifilename, strerror))
node_list.write ('# -*- coding: utf-8 -*-\n')
for texi_file in texi_files:
# Urgly: scan ly comments and variable names only in English doco
- is_english_doc = 'Documentation/user' in texi_file
+ is_english_doc = (
+ True
+ and not 'Documentation/de/' in texi_file
+ and not 'Documentation/es/' in texi_file
+ and not 'Documentation/fr/' in texi_file
+ and not 'Documentation/ja/' in texi_file
+ and not 'Documentation/hu/' in texi_file
+ and not 'Documentation/it/' in texi_file
+ and not 'Documentation/nl/' in texi_file
+ and not 'Documentation/po/' in texi_file
+ )
process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
os.path.basename (texi_file), node_list,
scan_ly=is_english_doc)
for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'):
node_list.write ('_(r"' + word + '")\n')
node_list.close ()
- os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename)
+ os.system ('xgettext --keyword=_doc -c -L Python --no-location -o ' + output_name + ' ' + node_list_filename)
else:
for texi_file in texi_files:
process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,