8 sys.path.append ('python')
11 DEST = os.path.join ('Documentation', 'snippets')
12 NEW_LYS = os.path.join ('Documentation', 'snippets', 'new')
13 TEXIDOCS = [os.path.join ('Documentation', language_code, 'texidocs')
14 for language_code in langdefs.LANGDICT]
16 USAGE = ''' Usage: makelsr.py [LSR_SNIPPETS_DIR]
17 This script must be run from top of the source tree;
18 it updates snippets %(DEST)s with snippets
19 from %(NEW_LYS)s or LSR_SNIPPETS_DIR.
20 If a snippet is present in both directories, the one
21 from %(NEW_LYS)s is preferred.
24 LY_HEADER_LSR = '''%% DO NOT EDIT this file manually; it is automatically
25 %% generated from LSR http://lsr.dsi.unimi.it
26 %% Make any changes in LSR itself, or in Documentation/snippets/new/ ,
27 %% and then run scripts/auxiliar/makelsr.py
29 %% This file is in the public domain.
32 LY_HEADER_NEW = '''%% DO NOT EDIT this file manually; it is automatically
34 %% Make any changes in Documentation/snippets/new/
35 %% and then run scripts/auxiliar/makelsr.py
37 %% This file is in the public domain.
40 def exit_with_usage (n=0):
41 sys.stderr.write (USAGE)
46 if len (sys.argv) >= 2:
48 if not (os.path.isdir (in_dir)):
49 sys.stderr.write (in_dir + ' is not a directory.\n')
51 if len (sys.argv) >= 3:
53 if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)):
55 TAGS = os.listdir (in_dir)
59 # which convert-ly to use
60 if os.path.isfile("out/bin/convert-ly"):
62 elif os.path.isfile("build/out/bin/convert-ly"):
63 conv_path='build/out/bin/'
66 convert_ly=conv_path+'convert-ly'
67 lilypond_bin=conv_path+'lilypond'
69 print 'using '+convert_ly
75 # mark the section that will be printed verbatim by lilypond-book
76 end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S)
78 doctitle_re = re.compile (r'(doctitle[a-zA-Z_]{0,6}\s*=\s*")((?:\\"|[^"\n])*)"')
79 texinfo_q_re = re.compile (r'@q{(.*?)}')
80 texinfo_qq_re = re.compile (r'@qq{(.*?)}')
81 def doctitle_sub (title_match):
82 # Comma forbidden in Texinfo node name
83 title = title_match.group (2).replace (',', '')
84 title = texinfo_q_re.sub (r"`\1'", title)
85 title = texinfo_qq_re.sub (r'\"\1\"', title)
86 return title_match.group (1) + title + '"'
88 def mark_verbatim_section (ly_code):
89 return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1)
91 # '% LSR' comments are to be stripped
92 lsr_comment_re = re.compile (r'\s*%+\s*LSR.*')
93 begin_header_re = re.compile (r'\\header\s*{', re.M)
94 ly_new_version_re = re.compile (r'\\version\s*"(.+?)"')
95 strip_white_spaces_re = re.compile (r'[ \t]+(?=\n)')
97 # add tags to ly files from LSR
98 def add_tags (ly_code, tags):
99 return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n',
102 # for snippets from input/new, add message for earliest working version
103 def add_version (ly_code):
104 return '''%% Note: this file works from version ''' + \
105 ly_new_version_re.search (ly_code).group (1) + '\n'
107 s = 'Translation of GIT [Cc]ommittish'
108 texidoc_chunk_re = re.compile (r'^(?:%+\s*' + s + \
109 r'.+)?\s*(?:texidoc|doctitle)([a-zA-Z]{2,4})\s+=(?:.|\n)*?(?=%+\s*' + \
110 s + r'|\n\} % begin verbatim|\n (?:doctitle|texidoc|lsrtags) |$(?!.|\n))', re.M)
112 def update_translated_texidoc (m, snippet_path, visited_languages):
113 base = os.path.splitext (os.path.basename (snippet_path))[0]
114 language_code = m.group (1)
115 visited_languages.append (language_code)
116 texidoc_path = os.path.join ('Documentation', language_code,
117 'texidocs', base + '.texidoc')
118 if os.path.isfile (texidoc_path):
119 return open (texidoc_path).read ()
123 def escape_backslashes_in_header(snippet):
124 # ASSUME: the \header exists.
125 header_char_number_start = snippet.find('\header {')
126 header_char_number_end = snippet.find('} % begin verbatim')
128 header = snippet[header_char_number_start:header_char_number_end]
129 # two levels of escaping happening here -- 4\ means 1\
130 # and the 10\ means two \ backslashes (that's 8\ ), and
131 # one backreference to group 1 (that's two 2\ ).
132 new_header = re.sub("@code\{\\\\([a-zA-Z])", "@code{\\\\\\\\\\1", header)
133 escaped_snippet = (snippet[:header_char_number_start] +
134 new_header + snippet[header_char_number_end:])
135 return escaped_snippet
137 def copy_ly (srcdir, name, tags):
140 dest = os.path.join (DEST, name)
141 tags = ', '.join (tags)
142 s = open (os.path.join (srcdir, name)).read ()
143 sys.stderr.write ('\nmakelsr.py: reading ' + os.path.join (srcdir, name) + '\n')
145 for path in TEXIDOCS:
146 texidoc_translation_path = \
147 os.path.join (path, os.path.splitext (name)[0] + '.texidoc')
148 if os.path.exists (texidoc_translation_path):
149 texidoc_translation = open (texidoc_translation_path).read ()
150 # Since we want to insert the translations verbatim using a
151 # regexp, \\ is understood as ONE escaped backslash. So we have
152 # to escape those backslashes once more...
153 texidoc_translation = texidoc_translation.replace ('\\', '\\\\')
154 s = begin_header_re.sub ('\\g<0>\n' + texidoc_translation, s, 1)
156 s = doctitle_re.sub (doctitle_sub, s)
157 if in_dir and in_dir in srcdir:
158 s = LY_HEADER_LSR + add_tags (s, tags)
160 s = LY_HEADER_NEW + add_version (s) + s
162 s = mark_verbatim_section (s)
163 s = lsr_comment_re.sub ('', s)
164 s = strip_white_spaces_re.sub ('', s)
165 s = escape_backslashes_in_header (s)
166 open (dest, 'w').write (s)
167 sys.stderr.write ('makelsr.py: writing ' + dest + '\n')
169 e = os.system (convert_ly+(" -d -e '%s'" % dest))
171 unconverted.append (dest)
172 if os.path.exists (dest + '~'):
173 os.remove (dest + '~')
174 # no need to check snippets from input/new
175 if in_dir and in_dir in srcdir:
176 e = os.system ("%s -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" %(lilypond_bin, dest))
180 def read_source_with_dirs (src):
184 srcdir = os.path.join (src, tag)
185 l[tag] = set (map (os.path.basename,
186 glob.glob (os.path.join (srcdir, '*.ly'))))
191 s[f] = (srcdir, [tag])
195 tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"')
197 def read_source (src):
199 l = dict ([(tag, set()) for tag in TAGS])
200 for f in glob.glob (os.path.join (src, '*.ly')):
201 basename = os.path.basename (f)
202 m = tags_re.search (open (f, 'r').read ())
204 file_tags = [tag.strip() for tag in m.group (1). split(',')]
205 s[basename] = (src, file_tags)
206 [l[tag].add (basename) for tag in file_tags if tag in TAGS]
208 notags_files.append (f)
212 def dump_file_list (file, file_list, update=False):
214 old_list = set (open (file, 'r').read ().splitlines ())
215 old_list.update (file_list)
216 new_list = list (old_list)
220 f.write ('\n'.join (sorted (new_list)) + '\n')
222 def update_ly_in_place (snippet_path):
223 visited_languages = []
224 contents = open (snippet_path).read ()
225 contents = texidoc_chunk_re.sub \
226 (lambda m: update_translated_texidoc (m,
230 need_line_break_workaround = False
231 for language_code in langdefs.LANGDICT:
232 if not language_code in visited_languages:
233 base = os.path.splitext (os.path.basename (snippet_path))[0]
234 texidoc_path = os.path.join ('Documentation', language_code,
235 'texidocs', base + '.texidoc')
236 if os.path.isfile (texidoc_path):
237 texidoc_translation = open (texidoc_path).read ()
238 texidoc_translation = texidoc_translation.replace ('\\', '\\\\')
239 contents = begin_header_re.sub ('\\g<0>\n' + texidoc_translation, contents, 1)
241 need_line_break_workaround = True
242 contents = doctitle_re.sub (doctitle_sub, contents)
243 contents = escape_backslashes_in_header (contents)
245 # workaround for a bug in the regex's that I'm not smart
246 # enough to figure out. -gp
247 if need_line_break_workaround:
248 first_translated = contents.find('%% Translation of')
249 keep = contents[:first_translated+5]
250 contents = keep + contents[first_translated+5:].replace('%% Translation of', '\n%% Translation of')
252 open (snippet_path, 'w').write (contents)
255 ## clean out existing lys and generated files
256 map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) +
257 glob.glob (os.path.join (DEST, '*.snippet-list')))
259 # read LSR source where tags are defined by subdirs
260 snippets, tag_lists = read_source_with_dirs (in_dir)
262 # read input/new where tags are directly defined
263 s, l = read_source (NEW_LYS)
266 tag_lists[t].update (l[t])
268 snippets, tag_lists = read_source (NEW_LYS)
269 ## update texidocs of snippets that don't come from NEW_LYS
270 for snippet_path in glob.glob (os.path.join (DEST, '*.ly')):
271 if not os.path.basename (snippet_path) in snippets:
272 update_ly_in_place (snippet_path)
274 for (name, (srcdir, tags)) in snippets.items ():
275 copy_ly (srcdir, name, tags)
276 for (tag, file_set) in tag_lists.items ():
277 dump_file_list (os.path.join (DEST, tag + '.snippet-list'),
278 file_set, update=not(in_dir))
280 sys.stderr.write ('These files could not be converted successfully by convert-ly:\n')
281 sys.stderr.write ('\n'.join (unconverted) + '\n\n')
283 sys.stderr.write ('No tags could be found in these files:\n')
284 sys.stderr.write ('\n'.join (notags_files) + '\n\n')
286 dump_file_list ('lsr-unsafe.txt', unsafe)
287 sys.stderr.write ('''
289 Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY!
291 xargs git diff HEAD < lsr-unsafe.txt