A number of Python scripts handle a part of the documentation
translation process.
All scripts used to maintain the translations
-are located in scripts/aux/:
+are located in scripts/auxiliar/:
* check_translation.py -- show diff to update a translation
* texi-langutils.py -- quickly and dirtily parse Texinfo files to
* mass-link.py -- link or symlink files between English documentation
and documentation in other languages
-Python modules used by scripts in scripts/aux/ or scripts/build/ (but
-not by installed Python scripts) are located in python/aux/:
+Python modules used by scripts in scripts/auxiliar/ or scripts/build/ (but
+not by installed Python scripts) are located in python/auxiliar/:
* manuals_definitions.py -- define manual names and name of
cross-reference Texinfo macros
* buildlib.py -- common functions (read piped output
For checking the coverage of the test suite, do the following
@example
-./scripts/aux/build-coverage.sh
+./scripts/auxiliar/build-coverage.sh
@emph{# uncovered files, least covered first}
-./scripts/aux/coverage.py --summary out-cov/*.cc
+./scripts/auxiliar/coverage.py --summary out-cov/*.cc
@emph{# consecutive uncovered lines, longest first}
-./scripts/aux/coverage.py --uncovered out-cov/*.cc
+./scripts/auxiliar/coverage.py --uncovered out-cov/*.cc
@end example
NCSB_FILE=`$FCMATCH --verbose "Century Schoolbook L:style=$style" | grep 'file:' | grep -v "\.ttf"`
NCSB_FILE=`echo $NCSB_FILE | sed 's/^.*"\(.*\)".*$/\1/g'`
- NCSB_FILE=`$PYTHON "$srcdir/scripts/aux/readlink.py" $NCSB_FILE`
+ NCSB_FILE=`$PYTHON "$srcdir/scripts/auxiliar/readlink.py" $NCSB_FILE`
NCSB_SOURCE_FILES="$NCSB_FILE $NCSB_SOURCE_FILES"
done
else
To update this directory, do at top of the source tree
-scripts/aux/makelsr.py DIR
+scripts/auxiliar/makelsr.py DIR
where DIR is the directory unpacked from lsr-snippets-doc-DATE tarball
available on http://lsr.dsi.unimi.it/download.
#
buildscript-dir = $(top-build-dir)/scripts/build/$(outconfbase)
-auxpython-dir = $(src-depth)/python/aux
-auxscript-dir = $(src-depth)/scripts/aux
+auxpython-dir = $(src-depth)/python/auxiliar
+auxscript-dir = $(src-depth)/scripts/auxiliar
script-dir = $(src-depth)/scripts
input-dir = $(src-depth)/input
depth = ..
-SUBDIRS=aux
+SUBDIRS=auxiliar
STEPMAKE_TEMPLATES=c python-module install-out po
+++ /dev/null
-depth=../..
-
-EXTRA_DIST_FILES = $(call src-wildcard,*.py)
-
-include $(depth)/make/stepmake.make
-
-default:
-
-local-clean:
- rm -f *.pyc
+++ /dev/null
-#!@PYTHON@
-
-import subprocess
-import re
-import sys
-
-verbose = False
-
-def read_pipe (command):
- child = subprocess.Popen (command,
- stdout = subprocess.PIPE,
- stderr = subprocess.PIPE,
- shell = True)
- (output, error) = child.communicate ()
- code = str (child.wait ())
- if not child.stdout or child.stdout.close ():
- print "pipe failed: %(command)s" % locals ()
- if code != '0':
- error = code + ' ' + error
- return (output, error)
-
-revision_re = re.compile ('GIT [Cc]ommittish:\s+([a-f0-9]+)')
-vc_diff_cmd = 'git diff %(color_flag)s %(revision)s HEAD -- %(original)s | cat'
-
-def check_translated_doc (original, translated_file, translated_contents, color=False):
- m = revision_re.search (translated_contents)
- if not m:
- sys.stderr.write ('error: ' + translated_file + \
- ": no 'GIT committish: <hash>' found.\nPlease check " + \
- 'the whole file against the original in English, then ' + \
- 'fill in HEAD committish in the header.\n')
- sys.exit (1)
- revision = m.group (1)
-
- if color:
- color_flag = '--color'
- else:
- color_flag = '--no-color'
- c = vc_diff_cmd % vars ()
- if verbose:
- sys.stderr.write ('running: ' + c)
- return read_pipe (c)
+++ /dev/null
-#!/usr/bin/python
-
-# This module is imported by check_texi_refs.py
-
-references_dict = {
- 'lilypond': 'ruser',
- 'lilypond-learning': 'rlearning',
- 'lilypond-program': 'rprogram',
- 'lilypond-snippets': 'rlsr',
- 'music-glossary': 'rglos',
- 'lilypond-internals': 'rinternals' }
+++ /dev/null
-#!@PYTHON@
-
-import re
-import os
-
-def new_link_path (link, dir, r):
- l = link.split ('/')
- d = dir.split ('/')
- i = 0
- while i < len(d) and i < len(l) and l[i] == '..':
- if r.match (d[i]):
- del l[i]
- else:
- i += 1
- return '/'.join ([x for x in l if not r.match (x)])
-
-def walk_tree (tree_roots = [],
- process_dirs = '.*',
- exclude_dirs = '',
- find_files = '.*',
- exclude_files = ''):
- """Walk directory trees and.returns (dirs, symlinks, files, extra_files) tuple.
-
- Arguments:
- tree_roots=DIRLIST use DIRLIST as tree roots list
- process_dir=PATTERN only process files in directories named PATTERN
- exclude_dir=PATTERN don't recurse into directories named PATTERN
- find_files=PATTERN filters files which are hardlinked
- exclude_files=PATTERN exclude files named PATTERN
- """
- find_files_re = re.compile (find_files)
- exclude_dirs_re = re.compile (exclude_dirs)
- exclude_files_re = re.compile (exclude_files)
- process_dirs_re = re.compile (process_dirs)
-
- dirs_paths = []
- symlinks_paths = []
- files_paths = []
-
- for d in tree_roots:
- for current_dir, dirs, files in os.walk(d):
- i = 0
- while i < len(dirs):
- if exclude_dirs_re.search (os.path.join (current_dir, dirs[i])):
- del dirs[i]
- else:
- p = os.path.join (current_dir, dirs[i])
- if os.path.islink (p):
- symlinks_paths.append (p)
- i += 1
- if not process_dirs_re.search (current_dir):
- continue
- dirs_paths.append (current_dir)
- for f in files:
- if exclude_files_re.match (f):
- continue
- p = os.path.join (current_dir, f)
- if os.path.islink (p):
- symlinks_paths.append (p)
- elif find_files_re.match (f):
- files_paths.append (p)
- return (dirs_paths, symlinks_paths, files_paths)
+++ /dev/null
-#!@PYTHON@
-
-"""
-Postprocess HTML files:
-add footer, tweak links, add language selection menu.
-"""
-import re
-import os
-import time
-import operator
-
-import langdefs
-
-# This is to try to make the docball not too big with almost duplicate files
-# see process_links()
-non_copied_pages = ['Documentation/user/out-www/lilypond-big-page',
- 'Documentation/user/out-www/lilypond-internals-big-page',
- 'Documentation/user/out-www/lilypond-learning-big-page',
- 'Documentation/user/out-www/lilypond-program-big-page',
- 'Documentation/user/out-www/music-glossary-big-page',
- 'out-www/examples',
- 'Documentation/topdocs',
- 'Documentation/bibliography',
- 'Documentation/out-www/THANKS',
- 'Documentation/out-www/DEDICATION',
- 'Documentation/out-www/devel',
- 'input/']
-
-def _doc (s):
- return s
-
-header = r"""
-"""
-
-footer = '''
-<div class="footer">
-<p class="footer_version">
-%(footer_name_version)s
-</p>
-<p class="footer_report">
-%(footer_report_links)s
-</p>
-</div>
-'''
-footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).')
-# ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process
-footer_report_links = _doc ('Your <a href="%(suggest_Docs_url)s">suggestions for the documentation</a> are welcome, please report errors to our <a href="%(mail_address_url)s">bug list</a>.')
-
-
-mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs'
-suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding'
-
-header_tag = '<!-- header_tag -->'
-header_tag_re = re.compile (header_tag)
-
-footer_tag = '<!-- footer_tag -->'
-footer_tag_re = re.compile (footer_tag)
-
-lang_available = _doc ("Other languages: %s.")
-browser_lang = _doc ('About <A HREF="%s">automatic language selection</A>.')
-browser_language_url = "/web/about/browser-language"
-
-LANGUAGES_TEMPLATE = '''
-<p id="languages">
- %(language_available)s
- <br/>
- %(browser_language)s
-</p>
-'''
-
-
-html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$')
-pages_dict = {}
-
-def build_pages_dict (filelist):
- """Build dictionary of available translations of each page"""
- global pages_dict
- for f in filelist:
- m = html_re.match (f)
- if m:
- g = m.groups()
- if len (g) <= 1 or g[1] == None:
- e = ''
- else:
- e = g[1]
- if not g[0] in pages_dict:
- pages_dict[g[0]] = [e]
- else:
- pages_dict[g[0]].append (e)
-
-def source_links_replace (m, source_val):
- return 'href="' + os.path.join (source_val, m.group (1)) + '"'
-
-splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\
-Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\
-lilypond-learning))/')
-
-snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets')
-user_ref_re = re.compile ('href="(?:\.\./)?lilypond\
-(-internals|-learning|-program|(?!-snippets))')
-
-docindex_link_re = re.compile (r'href="index.html"')
-
-
-## Windows does not support symlinks.
-# This function avoids creating symlinks for splitted HTML manuals
-# Get rid of symlinks in GNUmakefile.in (local-WWW-post)
-# this also fixes missing PNGs only present in translated docs
-def hack_urls (s, prefix):
- if splitted_docs_re.match (prefix):
- s = re.sub ('(href|src)="(../lily-.*?|.*?[.]png)"', '\\1="../\\2"', s)
-
- # fix xrefs between documents in different directories ad hoc
- if 'user/out-www/lilypond' in prefix:
- s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s)
- elif 'input/lsr' in prefix:
- s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s)
-
- # we also need to replace in the lsr, which is already processed above!
- if 'input/' in prefix or 'Documentation/topdocs' in prefix:
- # fix the link from the regtest, lsr and topdoc pages to the doc index
- # (rewrite prefix to obtain the relative path of the doc index page)
- rel_link = re.sub (r'out-www/.*$', '', prefix)
- rel_link = re.sub (r'[^/]*/', '../', rel_link)
- if 'input/regression' in prefix:
- indexfile = "Documentation/devel"
- else:
- indexfile = "index"
- s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s)
-
- source_path = os.path.join (os.path.dirname (prefix), 'source')
- if not os.path.islink (source_path):
- return s
- source_val = os.readlink (source_path)
- return re.sub ('href="source/(.*?)"', lambda m: source_links_replace (m, source_val), s)
-
-body_tag_re = re.compile ('(?i)<body([^>]*)>')
-html_tag_re = re.compile ('(?i)<html>')
-doctype_re = re.compile ('(?i)<!DOCTYPE')
-doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
-css_re = re.compile ('(?i)<link rel="stylesheet" type="text/css" ([^>]*)href="[^">]*?lilypond.*\.css"([^>]*)>')
-end_head_tag_re = re.compile ('(?i)</head>')
-css_link = """ <link rel="stylesheet" type="text/css" title="Patrick McCarty's design" href="%(rel)sDocumentation/lilypond-mccarty.css">
- <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond.css" title="Andrew Hawryluk's design">
- <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond-blue.css" title="Kurt Kroon's blue design">
- <!--[if lte IE 7]>
- <link href="%(rel)sDocumentation/lilypond-ie-fixes.css" rel="stylesheet" type="text/css">
- <![endif]-->
-"""
-
-
-def add_header (s, prefix):
- """Add header (<body>, doctype and CSS)"""
- if header_tag_re.search (s) == None:
- body = '<body\\1>'
- (s, n) = body_tag_re.subn (body + header, s, 1)
- if not n:
- (s, n) = html_tag_re.subn ('<html>' + header, s, 1)
- if not n:
- s = header + s
-
- s = header_tag + '\n' + s
-
- if doctype_re.search (s) == None:
- s = doctype + s
-
- if css_re.search (s) == None:
- depth = (prefix.count ('/') - 1) * '../'
- s = end_head_tag_re.sub ((css_link % {'rel': depth}) + '</head>', s)
- return s
-
-title_tag_re = re.compile ('.*?<title>(.*?)</title>', re.DOTALL)
-AT_web_title_re = re.compile ('@WEB-TITLE@')
-
-def add_title (s):
- # urg
- # maybe find first node?
- fallback_web_title = '-- --'
- m = title_tag_re.match (s)
- if m:
- fallback_web_title = m.group (1)
- s = AT_web_title_re.sub (fallback_web_title, s)
- return s
-
-footer_insert_re = re.compile ('<!--\s*FOOTER\s*-->')
-end_body_re = re.compile ('(?i)</body>')
-end_html_re = re.compile ('(?i)</html>')
-
-def add_footer (s, footer_text):
- """add footer"""
- (s, n) = footer_insert_re.subn (footer_text + '\n' + '<!-- FOOTER -->', s, 1)
- if not n:
- (s, n) = end_body_re.subn (footer_text + '\n' + '</body>', s, 1)
- if not n:
- (s, n) = end_html_re.subn (footer_text + '\n' + '</html>', s, 1)
- if not n:
- s += footer_text + '\n'
- return s
-
-def find_translations (prefix, lang_ext):
- """find available translations of a page"""
- available = []
- missing = []
- for l in langdefs.LANGUAGES:
- e = l.webext
- if lang_ext != e:
- if e in pages_dict[prefix]:
- available.append (l)
- elif lang_ext == '' and l.enabled and reduce (operator.and_,
- [not prefix.startswith (s)
- for s in non_copied_pages]):
- # English version of missing translated pages will be written
- missing.append (e)
- return available, missing
-
-online_links_re = re.compile ('''(href|src)=['"]\
-((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\
-([.]html)(#[^"']*|)['"]''')
-offline_links_re = re.compile ('href=[\'"]\
-((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]')
-big_page_name_re = re.compile ('''(.+?)-big-page''')
-
-def process_i18n_big_page_links (match, prefix, lang_ext):
- big_page_name = big_page_name_re.match (match.group (1))
- if big_page_name:
- destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix),
- big_page_name.group (0)))
- if not lang_ext in pages_dict[destination_path]:
- return match.group (0)
- return 'href="' + match.group (1) + '.' + lang_ext \
- + match.group (2) + match.group (3) + '"'
-
-def process_links (s, prefix, lang_ext, file_name, missing, target):
- page_flavors = {}
- if target == 'online':
- # Strip .html, suffix for auto language selection (content
- # negotiation). The menu must keep the full extension, so do
- # this before adding the menu.
- page_flavors[file_name] = \
- [lang_ext, online_links_re.sub ('\\1="\\2\\4"', s)]
- elif target == 'offline':
- # in LANG doc index: don't rewrite .html suffixes
- # as not all .LANG.html pages exist;
- # the doc index should be translated and contain links with the right suffixes
- if prefix == 'Documentation/out-www/index':
- page_flavors[file_name] = [lang_ext, s]
- elif lang_ext == '':
- page_flavors[file_name] = [lang_ext, s]
- for e in missing:
- page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = \
- [e, offline_links_re.sub ('href="\\1.' + e + '\\2\\3"', s)]
- else:
- # For saving bandwidth and disk space, we don't duplicate big pages
- # in English, so we must process translated big pages links differently.
- if 'big-page' in prefix:
- page_flavors[file_name] = \
- [lang_ext,
- offline_links_re.sub \
- (lambda match: process_i18n_big_page_links (match, prefix, lang_ext),
- s)]
- else:
- page_flavors[file_name] = \
- [lang_ext,
- offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)]
- return page_flavors
-
-def add_menu (page_flavors, prefix, available, target, translation):
- for k in page_flavors:
- language_menu = ''
- languages = ''
- if page_flavors[k][0] != '':
- t = translation[page_flavors[k][0]]
- else:
- t = _doc
- for lang in available:
- lang_file = lang.file_name (os.path.basename (prefix), '.html')
- if language_menu != '':
- language_menu += ', '
- language_menu += '<a href="%s">%s</a>' % (lang_file, t (lang.name))
- if target == 'offline':
- browser_language = ''
- elif target == 'online':
- browser_language = t (browser_lang) % browser_language_url
- if language_menu:
- language_available = t (lang_available) % language_menu
- languages = LANGUAGES_TEMPLATE % vars ()
- page_flavors[k][1] = add_footer (page_flavors[k][1], languages)
- return page_flavors
-
-
-def process_html_files (package_name = '',
- package_version = '',
- target = 'offline',
- name_filter = lambda s: s):
- """Add header, footer and tweak links to a number of HTML files
-
- Arguments:
- package_name=NAME set package_name to NAME
- package_version=VERSION set package version to VERSION
- targets=offline|online set page processing depending on the target
- offline is for reading HTML pages locally
- online is for hosting the HTML pages on a website with content
- negotiation
- name_filter a HTML file name filter
- """
- translation = langdefs.translation
- localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
-
- if "http://" in mail_address:
- mail_address_url = mail_address
- else:
- mail_address_url= 'mailto:' + mail_address
-
- versiontup = package_version.split ('.')
- branch_str = _doc ('stable-branch')
- if int (versiontup[1]) % 2:
- branch_str = _doc ('development-branch')
-
- # Initialize dictionaries for string formatting
- subst = {}
- subst[''] = dict ([i for i in globals ().items() if type (i[1]) is str])
- subst[''].update (dict ([i for i in locals ().items() if type (i[1]) is str]))
- for l in translation:
- e = langdefs.LANGDICT[l].webext
- if e:
- subst[e] = {}
- for name in subst['']:
- subst[e][name] = translation[l] (subst[''][name])
- # Do deeper string formatting as early as possible,
- # so only one '%' formatting pass is needed later
- for e in subst:
- subst[e]['footer_name_version'] = subst[e]['footer_name_version'] % subst[e]
- subst[e]['footer_report_links'] = subst[e]['footer_report_links'] % subst[e]
-
- for prefix, ext_list in pages_dict.items ():
- for lang_ext in ext_list:
- file_name = langdefs.lang_file_name (prefix, lang_ext, '.html')
- in_f = open (file_name)
- s = in_f.read()
- in_f.close()
-
- s = s.replace ('%', '%%')
- s = hack_urls (s, prefix)
- s = add_header (s, prefix)
-
- ### add footer
- if footer_tag_re.search (s) == None:
- s = add_footer (s, footer_tag + footer)
-
- available, missing = find_translations (prefix, lang_ext)
- page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target)
- # Add menu after stripping: must not have autoselection for language menu.
- page_flavors = add_menu (page_flavors, prefix, available, target, translation)
- for k in page_flavors:
- page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]]
- out_f = open (name_filter (k), 'w')
- out_f.write (page_flavors[k][1])
- out_f.close()
- # if the page is translated, a .en.html symlink is necessary for content negotiation
- if target == 'online' and ext_list != ['']:
- os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html'))
--- /dev/null
+depth=../..
+
+EXTRA_DIST_FILES = $(call src-wildcard,*.py)
+
+include $(depth)/make/stepmake.make
+
+default:
+
+local-clean:
+ rm -f *.pyc
--- /dev/null
+#!@PYTHON@
+
+import subprocess
+import re
+import sys
+
+verbose = False
+
+def read_pipe (command):
+ child = subprocess.Popen (command,
+ stdout = subprocess.PIPE,
+ stderr = subprocess.PIPE,
+ shell = True)
+ (output, error) = child.communicate ()
+ code = str (child.wait ())
+ if not child.stdout or child.stdout.close ():
+ print "pipe failed: %(command)s" % locals ()
+ if code != '0':
+ error = code + ' ' + error
+ return (output, error)
+
+revision_re = re.compile ('GIT [Cc]ommittish:\s+([a-f0-9]+)')
+vc_diff_cmd = 'git diff %(color_flag)s %(revision)s HEAD -- %(original)s | cat'
+
+def check_translated_doc (original, translated_file, translated_contents, color=False):
+ m = revision_re.search (translated_contents)
+ if not m:
+ sys.stderr.write ('error: ' + translated_file + \
+ ": no 'GIT committish: <hash>' found.\nPlease check " + \
+ 'the whole file against the original in English, then ' + \
+ 'fill in HEAD committish in the header.\n')
+ sys.exit (1)
+ revision = m.group (1)
+
+ if color:
+ color_flag = '--color'
+ else:
+ color_flag = '--no-color'
+ c = vc_diff_cmd % vars ()
+ if verbose:
+ sys.stderr.write ('running: ' + c)
+ return read_pipe (c)
--- /dev/null
+#!/usr/bin/python
+
+# This module is imported by check_texi_refs.py
+
+references_dict = {
+ 'lilypond': 'ruser',
+ 'lilypond-learning': 'rlearning',
+ 'lilypond-program': 'rprogram',
+ 'lilypond-snippets': 'rlsr',
+ 'music-glossary': 'rglos',
+ 'lilypond-internals': 'rinternals' }
--- /dev/null
+#!@PYTHON@
+
+import re
+import os
+
+def new_link_path (link, dir, r):
+ l = link.split ('/')
+ d = dir.split ('/')
+ i = 0
+ while i < len(d) and i < len(l) and l[i] == '..':
+ if r.match (d[i]):
+ del l[i]
+ else:
+ i += 1
+ return '/'.join ([x for x in l if not r.match (x)])
+
+def walk_tree (tree_roots = [],
+ process_dirs = '.*',
+ exclude_dirs = '',
+ find_files = '.*',
+ exclude_files = ''):
+ """Walk directory trees and.returns (dirs, symlinks, files, extra_files) tuple.
+
+ Arguments:
+ tree_roots=DIRLIST use DIRLIST as tree roots list
+ process_dir=PATTERN only process files in directories named PATTERN
+ exclude_dir=PATTERN don't recurse into directories named PATTERN
+ find_files=PATTERN filters files which are hardlinked
+ exclude_files=PATTERN exclude files named PATTERN
+ """
+ find_files_re = re.compile (find_files)
+ exclude_dirs_re = re.compile (exclude_dirs)
+ exclude_files_re = re.compile (exclude_files)
+ process_dirs_re = re.compile (process_dirs)
+
+ dirs_paths = []
+ symlinks_paths = []
+ files_paths = []
+
+ for d in tree_roots:
+ for current_dir, dirs, files in os.walk(d):
+ i = 0
+ while i < len(dirs):
+ if exclude_dirs_re.search (os.path.join (current_dir, dirs[i])):
+ del dirs[i]
+ else:
+ p = os.path.join (current_dir, dirs[i])
+ if os.path.islink (p):
+ symlinks_paths.append (p)
+ i += 1
+ if not process_dirs_re.search (current_dir):
+ continue
+ dirs_paths.append (current_dir)
+ for f in files:
+ if exclude_files_re.match (f):
+ continue
+ p = os.path.join (current_dir, f)
+ if os.path.islink (p):
+ symlinks_paths.append (p)
+ elif find_files_re.match (f):
+ files_paths.append (p)
+ return (dirs_paths, symlinks_paths, files_paths)
--- /dev/null
+#!@PYTHON@
+
+"""
+Postprocess HTML files:
+add footer, tweak links, add language selection menu.
+"""
+import re
+import os
+import time
+import operator
+
+import langdefs
+
+# This is to try to make the docball not too big with almost duplicate files
+# see process_links()
+non_copied_pages = ['Documentation/user/out-www/lilypond-big-page',
+ 'Documentation/user/out-www/lilypond-internals-big-page',
+ 'Documentation/user/out-www/lilypond-learning-big-page',
+ 'Documentation/user/out-www/lilypond-program-big-page',
+ 'Documentation/user/out-www/music-glossary-big-page',
+ 'out-www/examples',
+ 'Documentation/topdocs',
+ 'Documentation/bibliography',
+ 'Documentation/out-www/THANKS',
+ 'Documentation/out-www/DEDICATION',
+ 'Documentation/out-www/devel',
+ 'input/']
+
+def _doc (s):
+ return s
+
+header = r"""
+"""
+
+footer = '''
+<div class="footer">
+<p class="footer_version">
+%(footer_name_version)s
+</p>
+<p class="footer_report">
+%(footer_report_links)s
+</p>
+</div>
+'''
+footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).')
+# ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process
+footer_report_links = _doc ('Your <a href="%(suggest_Docs_url)s">suggestions for the documentation</a> are welcome, please report errors to our <a href="%(mail_address_url)s">bug list</a>.')
+
+
+mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs'
+suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding'
+
+header_tag = '<!-- header_tag -->'
+header_tag_re = re.compile (header_tag)
+
+footer_tag = '<!-- footer_tag -->'
+footer_tag_re = re.compile (footer_tag)
+
+lang_available = _doc ("Other languages: %s.")
+browser_lang = _doc ('About <A HREF="%s">automatic language selection</A>.')
+browser_language_url = "/web/about/browser-language"
+
+LANGUAGES_TEMPLATE = '''
+<p id="languages">
+ %(language_available)s
+ <br/>
+ %(browser_language)s
+</p>
+'''
+
+
+html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$')
+pages_dict = {}
+
+def build_pages_dict (filelist):
+ """Build dictionary of available translations of each page"""
+ global pages_dict
+ for f in filelist:
+ m = html_re.match (f)
+ if m:
+ g = m.groups()
+ if len (g) <= 1 or g[1] == None:
+ e = ''
+ else:
+ e = g[1]
+ if not g[0] in pages_dict:
+ pages_dict[g[0]] = [e]
+ else:
+ pages_dict[g[0]].append (e)
+
+def source_links_replace (m, source_val):
+ return 'href="' + os.path.join (source_val, m.group (1)) + '"'
+
+splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\
+Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\
+lilypond-learning))/')
+
+snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets')
+user_ref_re = re.compile ('href="(?:\.\./)?lilypond\
+(-internals|-learning|-program|(?!-snippets))')
+
+docindex_link_re = re.compile (r'href="index.html"')
+
+
+## Windows does not support symlinks.
+# This function avoids creating symlinks for splitted HTML manuals
+# Get rid of symlinks in GNUmakefile.in (local-WWW-post)
+# this also fixes missing PNGs only present in translated docs
+def hack_urls (s, prefix):
+ if splitted_docs_re.match (prefix):
+ s = re.sub ('(href|src)="(../lily-.*?|.*?[.]png)"', '\\1="../\\2"', s)
+
+ # fix xrefs between documents in different directories ad hoc
+ if 'user/out-www/lilypond' in prefix:
+ s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s)
+ elif 'input/lsr' in prefix:
+ s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s)
+
+ # we also need to replace in the lsr, which is already processed above!
+ if 'input/' in prefix or 'Documentation/topdocs' in prefix:
+ # fix the link from the regtest, lsr and topdoc pages to the doc index
+ # (rewrite prefix to obtain the relative path of the doc index page)
+ rel_link = re.sub (r'out-www/.*$', '', prefix)
+ rel_link = re.sub (r'[^/]*/', '../', rel_link)
+ if 'input/regression' in prefix:
+ indexfile = "Documentation/devel"
+ else:
+ indexfile = "index"
+ s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s)
+
+ source_path = os.path.join (os.path.dirname (prefix), 'source')
+ if not os.path.islink (source_path):
+ return s
+ source_val = os.readlink (source_path)
+ return re.sub ('href="source/(.*?)"', lambda m: source_links_replace (m, source_val), s)
+
+body_tag_re = re.compile ('(?i)<body([^>]*)>')
+html_tag_re = re.compile ('(?i)<html>')
+doctype_re = re.compile ('(?i)<!DOCTYPE')
+doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
+css_re = re.compile ('(?i)<link rel="stylesheet" type="text/css" ([^>]*)href="[^">]*?lilypond.*\.css"([^>]*)>')
+end_head_tag_re = re.compile ('(?i)</head>')
+css_link = """ <link rel="stylesheet" type="text/css" title="Patrick McCarty's design" href="%(rel)sDocumentation/lilypond-mccarty.css">
+ <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond.css" title="Andrew Hawryluk's design">
+ <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond-blue.css" title="Kurt Kroon's blue design">
+ <!--[if lte IE 7]>
+ <link href="%(rel)sDocumentation/lilypond-ie-fixes.css" rel="stylesheet" type="text/css">
+ <![endif]-->
+"""
+
+
+def add_header (s, prefix):
+ """Add header (<body>, doctype and CSS)"""
+ if header_tag_re.search (s) == None:
+ body = '<body\\1>'
+ (s, n) = body_tag_re.subn (body + header, s, 1)
+ if not n:
+ (s, n) = html_tag_re.subn ('<html>' + header, s, 1)
+ if not n:
+ s = header + s
+
+ s = header_tag + '\n' + s
+
+ if doctype_re.search (s) == None:
+ s = doctype + s
+
+ if css_re.search (s) == None:
+ depth = (prefix.count ('/') - 1) * '../'
+ s = end_head_tag_re.sub ((css_link % {'rel': depth}) + '</head>', s)
+ return s
+
+title_tag_re = re.compile ('.*?<title>(.*?)</title>', re.DOTALL)
+AT_web_title_re = re.compile ('@WEB-TITLE@')
+
+def add_title (s):
+ # urg
+ # maybe find first node?
+ fallback_web_title = '-- --'
+ m = title_tag_re.match (s)
+ if m:
+ fallback_web_title = m.group (1)
+ s = AT_web_title_re.sub (fallback_web_title, s)
+ return s
+
+footer_insert_re = re.compile ('<!--\s*FOOTER\s*-->')
+end_body_re = re.compile ('(?i)</body>')
+end_html_re = re.compile ('(?i)</html>')
+
+def add_footer (s, footer_text):
+ """add footer"""
+ (s, n) = footer_insert_re.subn (footer_text + '\n' + '<!-- FOOTER -->', s, 1)
+ if not n:
+ (s, n) = end_body_re.subn (footer_text + '\n' + '</body>', s, 1)
+ if not n:
+ (s, n) = end_html_re.subn (footer_text + '\n' + '</html>', s, 1)
+ if not n:
+ s += footer_text + '\n'
+ return s
+
+def find_translations (prefix, lang_ext):
+ """find available translations of a page"""
+ available = []
+ missing = []
+ for l in langdefs.LANGUAGES:
+ e = l.webext
+ if lang_ext != e:
+ if e in pages_dict[prefix]:
+ available.append (l)
+ elif lang_ext == '' and l.enabled and reduce (operator.and_,
+ [not prefix.startswith (s)
+ for s in non_copied_pages]):
+ # English version of missing translated pages will be written
+ missing.append (e)
+ return available, missing
+
+online_links_re = re.compile ('''(href|src)=['"]\
+((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\
+([.]html)(#[^"']*|)['"]''')
+offline_links_re = re.compile ('href=[\'"]\
+((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]')
+big_page_name_re = re.compile ('''(.+?)-big-page''')
+
+def process_i18n_big_page_links (match, prefix, lang_ext):
+ big_page_name = big_page_name_re.match (match.group (1))
+ if big_page_name:
+ destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix),
+ big_page_name.group (0)))
+ if not lang_ext in pages_dict[destination_path]:
+ return match.group (0)
+ return 'href="' + match.group (1) + '.' + lang_ext \
+ + match.group (2) + match.group (3) + '"'
+
+def process_links (s, prefix, lang_ext, file_name, missing, target):
+ page_flavors = {}
+ if target == 'online':
+ # Strip .html, suffix for auto language selection (content
+ # negotiation). The menu must keep the full extension, so do
+ # this before adding the menu.
+ page_flavors[file_name] = \
+ [lang_ext, online_links_re.sub ('\\1="\\2\\4"', s)]
+ elif target == 'offline':
+ # in LANG doc index: don't rewrite .html suffixes
+ # as not all .LANG.html pages exist;
+ # the doc index should be translated and contain links with the right suffixes
+ if prefix == 'Documentation/out-www/index':
+ page_flavors[file_name] = [lang_ext, s]
+ elif lang_ext == '':
+ page_flavors[file_name] = [lang_ext, s]
+ for e in missing:
+ page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = \
+ [e, offline_links_re.sub ('href="\\1.' + e + '\\2\\3"', s)]
+ else:
+ # For saving bandwidth and disk space, we don't duplicate big pages
+ # in English, so we must process translated big pages links differently.
+ if 'big-page' in prefix:
+ page_flavors[file_name] = \
+ [lang_ext,
+ offline_links_re.sub \
+ (lambda match: process_i18n_big_page_links (match, prefix, lang_ext),
+ s)]
+ else:
+ page_flavors[file_name] = \
+ [lang_ext,
+ offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)]
+ return page_flavors
+
+def add_menu (page_flavors, prefix, available, target, translation):
+ for k in page_flavors:
+ language_menu = ''
+ languages = ''
+ if page_flavors[k][0] != '':
+ t = translation[page_flavors[k][0]]
+ else:
+ t = _doc
+ for lang in available:
+ lang_file = lang.file_name (os.path.basename (prefix), '.html')
+ if language_menu != '':
+ language_menu += ', '
+ language_menu += '<a href="%s">%s</a>' % (lang_file, t (lang.name))
+ if target == 'offline':
+ browser_language = ''
+ elif target == 'online':
+ browser_language = t (browser_lang) % browser_language_url
+ if language_menu:
+ language_available = t (lang_available) % language_menu
+ languages = LANGUAGES_TEMPLATE % vars ()
+ page_flavors[k][1] = add_footer (page_flavors[k][1], languages)
+ return page_flavors
+
+
+def process_html_files (package_name = '',
+ package_version = '',
+ target = 'offline',
+ name_filter = lambda s: s):
+ """Add header, footer and tweak links to a number of HTML files
+
+ Arguments:
+ package_name=NAME set package_name to NAME
+ package_version=VERSION set package version to VERSION
+ targets=offline|online set page processing depending on the target
+ offline is for reading HTML pages locally
+ online is for hosting the HTML pages on a website with content
+ negotiation
+ name_filter a HTML file name filter
+ """
+ translation = langdefs.translation
+ localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
+
+ if "http://" in mail_address:
+ mail_address_url = mail_address
+ else:
+ mail_address_url= 'mailto:' + mail_address
+
+ versiontup = package_version.split ('.')
+ branch_str = _doc ('stable-branch')
+ if int (versiontup[1]) % 2:
+ branch_str = _doc ('development-branch')
+
+ # Initialize dictionaries for string formatting
+ subst = {}
+ subst[''] = dict ([i for i in globals ().items() if type (i[1]) is str])
+ subst[''].update (dict ([i for i in locals ().items() if type (i[1]) is str]))
+ for l in translation:
+ e = langdefs.LANGDICT[l].webext
+ if e:
+ subst[e] = {}
+ for name in subst['']:
+ subst[e][name] = translation[l] (subst[''][name])
+ # Do deeper string formatting as early as possible,
+ # so only one '%' formatting pass is needed later
+ for e in subst:
+ subst[e]['footer_name_version'] = subst[e]['footer_name_version'] % subst[e]
+ subst[e]['footer_report_links'] = subst[e]['footer_report_links'] % subst[e]
+
+ for prefix, ext_list in pages_dict.items ():
+ for lang_ext in ext_list:
+ file_name = langdefs.lang_file_name (prefix, lang_ext, '.html')
+ in_f = open (file_name)
+ s = in_f.read()
+ in_f.close()
+
+ s = s.replace ('%', '%%')
+ s = hack_urls (s, prefix)
+ s = add_header (s, prefix)
+
+ ### add footer
+ if footer_tag_re.search (s) == None:
+ s = add_footer (s, footer_tag + footer)
+
+ available, missing = find_translations (prefix, lang_ext)
+ page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target)
+ # Add menu after stripping: must not have autoselection for language menu.
+ page_flavors = add_menu (page_flavors, prefix, available, target, translation)
+ for k in page_flavors:
+ page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]]
+ out_f = open (name_filter (k), 'w')
+ out_f.write (page_flavors[k][1])
+ out_f.close()
+ # if the page is translated, a .en.html symlink is necessary for content negotiation
+ if target == 'online' and ext_list != ['']:
+ os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html'))
depth = ..
-SUBDIRS=aux build
+SUBDIRS=auxiliar build
SEXECUTABLES=convert-ly lilypond-book abc2ly etf2ly midi2ly lilypond-invoke-editor musicxml2ly lilysong lilymidi
+++ /dev/null
-depth=../..
-
-EXTRA_DIST_FILES = $(call src-wildcard,*.sh) $(call src-wildcard,*.py)
-EXTRA_DIST_FILES += pfx2ttf.fontforge
-
-include $(depth)/make/stepmake.make
-
-default:
+++ /dev/null
-#!/bin/sh
-
-if test "$1" == "--fresh"; then
- fresh=yes
-fi
-
-if test ! -f config-cov.make; then
- fresh=yes
-fi
-
-if test "$fresh" = "yes";
-then
- ./configure --enable-config=cov --disable-optimising \
- && make conf=cov -j2 clean \
- && perl -i~ -pe 's/-pipe /-fprofile-arcs -ftest-coverage -pipe /g' config-cov.make \
- && perl -i~ -pe 's/ -ldl / -lgcov -ldl /g' config-cov.make
-else
- find -name '*.gcda' -exec rm '{}' ';'
-fi
-
-mkdir -p scripts/out-cov/
-touch scripts/out-cov/midi2ly scripts/out-cov/midi2ly.1
-make conf=cov -j2 && \
- make conf=cov test-clean OUT_TEST=testcov LILYPOND_JOBS= && \
- make conf=cov test OUT_TEST=testcov LILYPOND_JOBS='-dtrace-scheme-coverage '
-
-if test "$?" != "0"; then
- tail -100 out-cov/test-run.log
- exit 1
-fi
-
-depth=../..
-resultdir=out/coverage-results
-
-rm -rf $resultdir
-mkdir $resultdir
-cd $resultdir
-
-ln $depth/lily/* .
-ln $depth/scm/*.scm .
-mv $depth/input/regression/out-testcov/*.scm.cov .
-ln $depth/ly/*.ly .
-ln $depth/lily/out-cov/*[ch] .
-mkdir include
-ln $depth/lily/include/* include/
-ln $depth/flower/include/* include/
-for a in *[cl] *.yy
-do
- gcov -o $depth/lily/out-cov/ -p $a > $a.gcov-summary
-done
-
-$depth/scripts/aux/coverage.py --uncovered *.cc > uncovered.txt
-$depth/scripts/aux/coverage.py --hotspots *.cc > hotspots.txt
-$depth/scripts/aux/coverage.py --summary *.cc > summary.txt
-$depth/scripts/aux/coverage.py --uncovered *.scm > uncovered-scheme.txt
-
-head -20 summary.txt
-
-cat <<EOF
-results in
-
- out/coverage-results/summary.txt
- out/coverage-results/uncovered.txt
- out/coverage-results/uncovered-scheme.txt
- out/coverage-results/hotspots.txt
-
-EOF
+++ /dev/null
-#!/bin/sh
-
-if test "$1" == "--fresh"; then
- fresh=yes
-fi
-
-if test ! -f config-prof.make; then
- fresh=yes
-fi
-
-if test "$fresh" = "yes";
-then
- ./configure --enable-config=prof --enable-optimising \
- && perl -i~ -pe 's/-pipe /-pg -pipe /g' config-prof.make \
- && perl -i~ -pe 's/ -ldl / -pg -ldl /g' config-prof.make
-fi
-
-make conf=prof -j2
-
-if test "$?" != "0"; then
- exit 2
-fi
-
-depth=../..
-resultdir=out/profile-results
-
-rm -rf $resultdir
-mkdir $resultdir
-cd $resultdir
-
-
-cat > long-score.ly << EOF
-\version "2.10.0"
-foo = \new Staff \new Voice \repeat unfold 50 \relative { c4 d8[ d16( e]~ e16[ e e) f] g8 }
-\score {
- \new ChoirStaff <<
- \foo \foo \foo \foo
- \foo \foo \foo \foo
-
- >>
- \midi {}
- \layout {}
-}
-EOF
-
-rm gmon.sum
-
-exe=$depth/out-prof/bin/lilypond
-
-## todo: figure out representative sample.
-files="wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 mozart-hrn-3 mozart-hrn-3 long-score"
-
-
-
-$exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
- -I $depth/input/mutopia/W.A.Mozart/ \
- $files
-
-
-for a in *.profile; do
- echo $a
- cat $a
-done
-
-echo 'running gprof'
-gprof $exe > profile
-
-exit 0
-
-
-## gprof -s takes forever.
-for a in seq 1 3; do
- for f in $files ; do
- $exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
- -I $depth/input/mutopia/W.A.Mozart/ \
- $f
-
- echo 'running gprof'
- if test -f gmon.sum ; then
- gprof -s $exe gmon.out gmon.sum
- else
- mv gmon.out gmon.sum
- fi
- done
-done
-
-gprof $exe gmon.sum > profile
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-check_texi_refs.py
-Interactive Texinfo cross-references checking and fixing tool
-
-"""
-
-
-import sys
-import re
-import os
-import optparse
-import imp
-
-outdir = 'out-www'
-
-log = sys.stderr
-stdout = sys.stdout
-
-file_not_found = 'file not found in include path'
-
-warn_not_fixed = '*** Warning: this broken x-ref has not been fixed!\n'
-
-opt_parser = optparse.OptionParser (usage='check_texi_refs.py [OPTION]... FILE',
- description='''Check and fix \
-cross-references in a collection of Texinfo
-documents heavily cross-referenced each other.
-''')
-
-opt_parser.add_option ('-a', '--auto-fix',
- help="Automatically fix cross-references whenever \
-it is possible",
- action='store_true',
- dest='auto_fix',
- default=False)
-
-opt_parser.add_option ('-b', '--batch',
- help="Do not run interactively",
- action='store_false',
- dest='interactive',
- default=True)
-
-opt_parser.add_option ('-c', '--check-comments',
- help="Also check commented out x-refs",
- action='store_true',
- dest='check_comments',
- default=False)
-
-opt_parser.add_option ('-p', '--check-punctuation',
- help="Check punctuation after x-refs",
- action='store_true',
- dest='check_punctuation',
- default=False)
-
-opt_parser.add_option ("-I", '--include', help="add DIR to include path",
- metavar="DIR",
- action='append', dest='include_path',
- default=[os.path.abspath (os.getcwd ())])
-
-(options, files) = opt_parser.parse_args ()
-
-class InteractionError (Exception):
- pass
-
-
-manuals_defs = imp.load_source ('manuals_defs', files[0])
-manuals = {}
-
-def find_file (name, prior_directory='.'):
- p = os.path.join (prior_directory, name)
- out_p = os.path.join (prior_directory, outdir, name)
- if os.path.isfile (p):
- return p
- elif os.path.isfile (out_p):
- return out_p
-
- # looking for file in include_path
- for d in options.include_path:
- p = os.path.join (d, name)
- if os.path.isfile (p):
- return p
-
- # file not found in include_path: looking in `outdir' subdirs
- for d in options.include_path:
- p = os.path.join (d, outdir, name)
- if os.path.isfile (p):
- return p
-
- raise EnvironmentError (1, file_not_found, name)
-
-
-exit_code = 0
-
-def set_exit_code (n):
- global exit_code
- exit_code = max (exit_code, n)
-
-
-if options.interactive:
- try:
- import readline
- except:
- pass
-
- def yes_prompt (question, default=False, retries=3):
- d = {True: 'y', False: 'n'}.get (default, False)
- while retries:
- a = raw_input ('%s [default: %s]' % (question, d) + '\n')
- if a.lower ().startswith ('y'):
- return True
- if a.lower ().startswith ('n'):
- return False
- if a == '' or retries < 0:
- return default
- stdout.write ("Please answer yes or no.\n")
- retries -= 1
-
- def search_prompt ():
- """Prompt user for a substring to look for in node names.
-
-If user input is empty or matches no node name, return None,
-otherwise return a list of (manual, node name, file) tuples.
-
-"""
- substring = raw_input ("Enter a substring to search in node names \
-(press Enter to skip this x-ref):\n")
- if not substring:
- return None
- substring = substring.lower ()
- matches = []
- for k in manuals:
- matches += [(k, node, manuals[k]['nodes'][node][0])
- for node in manuals[k]['nodes']
- if substring in node.lower ()]
- return matches
-
-else:
- def yes_prompt (question, default=False, retries=3):
- return default
-
- def search_prompt ():
- return None
-
-
-ref_re = re.compile \
- ('@(ref|ruser|rlearning|rprogram|rglos)(?:\\{(?P<ref>[^,\\\\\\}]+?)|\
-named\\{(?P<refname>[^,\\\\]+?),(?P<display>[^,\\\\\\}]+?))\\}(?P<last>.)',
- re.DOTALL)
-node_include_re = re.compile (r'(?m)^@(node|include)\s+(.+?)$')
-
-whitespace_re = re.compile (r'\s+')
-line_start_re = re.compile ('(?m)^')
-
-def which_line (index, newline_indices):
- """Calculate line number of a given string index
-
-Return line number of string index index, where
-newline_indices is an ordered iterable of all newline indices.
-"""
- inf = 0
- sup = len (newline_indices) - 1
- n = len (newline_indices)
- while inf + 1 != sup:
- m = (inf + sup) / 2
- if index >= newline_indices [m]:
- inf = m
- else:
- sup = m
- return inf + 1
-
-
-comments_re = re.compile ('(?<!@)(@c(?:omment)? \
-.*?\\n|^@ignore\\n.*?\\n@end ignore\\n)', re.M | re.S)
-
-def calc_comments_boundaries (texinfo_doc):
- return [(m.start (), m.end ()) for m in comments_re.finditer (texinfo_doc)]
-
-
-def is_commented_out (start, end, comments_boundaries):
- for k in range (len (comments_boundaries)):
- if (start > comments_boundaries[k][0]
- and end <= comments_boundaries[k][1]):
- return True
- elif end <= comments_boundaries[k][0]:
- return False
- return False
-
-
-def read_file (f, d):
- s = open (f).read ()
- base = os.path.basename (f)
- dir = os.path.dirname (f)
-
- d['contents'][f] = s
-
- d['newline_indices'][f] = [m.end () for m in line_start_re.finditer (s)]
- if options.check_comments:
- d['comments_boundaries'][f] = []
- else:
- d['comments_boundaries'][f] = calc_comments_boundaries (s)
-
- for m in node_include_re.finditer (s):
- if m.group (1) == 'node':
- line = which_line (m.start (), d['newline_indices'][f])
- d['nodes'][m.group (2)] = (f, line)
-
- elif m.group (1) == 'include':
- try:
- p = find_file (m.group (2), dir)
- except EnvironmentError, (errno, strerror):
- if strerror == file_not_found:
- continue
- else:
- raise
- read_file (p, d)
-
-
-def read_manual (name):
- """Look for all node names and cross-references in a Texinfo document
-
-Return a (manual, dictionary) tuple where manual is the cross-reference
-macro name defined by references_dict[name], and dictionary
-has the following keys:
-
- 'nodes' is a dictionary of `node name':(file name, line number),
-
- 'contents' is a dictionary of file:`full file contents',
-
- 'newline_indices' is a dictionary of
-file:[list of beginning-of-line string indices],
-
- 'comments_boundaries' is a list of (start, end) tuples,
-which contain string indices of start and end of each comment.
-
-Included files that can be found in the include path are processed too.
-
-"""
- d = {}
- d['nodes'] = {}
- d['contents'] = {}
- d['newline_indices'] = {}
- d['comments_boundaries'] = {}
- manual = manuals_defs.references_dict.get (name, '')
- try:
- f = find_file (name + '.tely')
- except EnvironmentError, (errno, strerror):
- if not strerror == file_not_found:
- raise
- else:
- try:
- f = find_file (name + '.texi')
- except EnvironmentError, (errno, strerror):
- if strerror == file_not_found:
- sys.stderr.write (name + '.{texi,tely}: ' +
- file_not_found + '\n')
- return (manual, d)
- else:
- raise
-
- log.write ("Processing manual %s (%s)\n" % (f, manual))
- read_file (f, d)
- return (manual, d)
-
-
-log.write ("Reading files...\n")
-
-manuals = dict ([read_manual (name)
- for name in manuals_defs.references_dict.keys ()])
-
-ref_fixes = set ()
-bad_refs_count = 0
-fixes_count = 0
-
-def add_fix (old_type, old_ref, new_type, new_ref):
- ref_fixes.add ((old_type, old_ref, new_type, new_ref))
-
-
-def lookup_fix (r):
- found = []
- for (old_type, old_ref, new_type, new_ref) in ref_fixes:
- if r == old_ref:
- found.append ((new_type, new_ref))
- return found
-
-
-def preserve_linebreak (text, linebroken):
- if linebroken:
- if ' ' in text:
- text = text.replace (' ', '\n', 1)
- n = ''
- else:
- n = '\n'
- else:
- n = ''
- return (text, n)
-
-
-def choose_in_numbered_list (message, string_list, sep=' ', retries=3):
- S = set (string_list)
- S.discard ('')
- string_list = list (S)
- numbered_list = sep.join ([str (j + 1) + '. ' + string_list[j]
- for j in range (len (string_list))]) + '\n'
- t = retries
- while t > 0:
- value = ''
- stdout.write (message +
- "(press Enter to discard and start a new search)\n")
- input = raw_input (numbered_list)
- if not input:
- return ''
- try:
- value = string_list[int (input) - 1]
- except IndexError:
- stdout.write ("Error: index number out of range\n")
- except ValueError:
- matches = [input in v for v in string_list]
- n = matches.count (True)
- if n == 0:
- stdout.write ("Error: input matches no item in the list\n")
- elif n > 1:
- stdout.write ("Error: ambiguous input (matches several items \
-in the list)\n")
- else:
- value = string_list[matches.index (True)]
- if value:
- return value
- t -= 1
- raise InteractionError ("%d retries limit exceeded" % retries)
-
-refs_count = 0
-
-def check_ref (manual, file, m):
- global fixes_count, bad_refs_count, refs_count
- refs_count += 1
- bad_ref = False
- fixed = True
- type = m.group (1)
- original_name = m.group ('ref') or m.group ('refname')
- name = whitespace_re.sub (' ', original_name). strip ()
- newline_indices = manuals[manual]['newline_indices'][file]
- line = which_line (m.start (), newline_indices)
- linebroken = '\n' in original_name
- original_display_name = m.group ('display')
- next_char = m.group ('last')
- if original_display_name: # the xref has an explicit display name
- display_linebroken = '\n' in original_display_name
- display_name = whitespace_re.sub (' ', original_display_name). strip ()
- commented_out = is_commented_out \
- (m.start (), m.end (), manuals[manual]['comments_boundaries'][file])
- useful_fix = not outdir in file
-
- # check puncuation after x-ref
- if options.check_punctuation and not next_char in '.,;:!?':
- stdout.write ("Warning: %s: %d: `%s': x-ref \
-not followed by punctuation\n" % (file, line, name))
-
- # validate xref
- explicit_type = type
- new_name = name
-
- if type != 'ref' and type == manual and not commented_out:
- if useful_fix:
- fixed = False
- bad_ref = True
- stdout.write ("\n%s: %d: `%s': external %s x-ref should be internal\n"
- % (file, line, name, type))
- if options.auto_fix or yes_prompt ("Fix this?"):
- type = 'ref'
-
- if type == 'ref':
- explicit_type = manual
-
- if not name in manuals[explicit_type]['nodes'] and not commented_out:
- bad_ref = True
- fixed = False
- stdout.write ('\n')
- if type == 'ref':
- stdout.write ("\e[1;31m%s: %d: `%s': wrong internal x-ref\e[0m\n"
- % (file, line, name))
- else:
- stdout.write ("\e[1;31m%s: %d: `%s': wrong external `%s' x-ref\e[0m\n"
- % (file, line, name, type))
- # print context
- stdout.write ('--\n' + manuals[manual]['contents'][file]
- [newline_indices[max (0, line - 2)]:
- newline_indices[min (line + 3,
- len (newline_indices) - 1)]] +
- '--\n')
-
- # try to find the reference in other manuals
- found = []
- for k in [k for k in manuals if k != explicit_type]:
- if name in manuals[k]['nodes']:
- if k == manual:
- found = ['ref']
- stdout.write ("\e[1;32m found as internal x-ref\e[0m\n")
- break
- else:
- found.append (k)
- stdout.write ("\e[1;32m found as `%s' x-ref\e[0m\n" % k)
-
- if (len (found) == 1
- and (options.auto_fix or yes_prompt ("Fix this x-ref?"))):
- add_fix (type, name, found[0], name)
- type = found[0]
- fixed = True
-
- elif len (found) > 1 and useful_fix:
- if options.interactive or options.auto_fix:
- stdout.write ("* Several manuals contain this node name, \
-cannot determine manual automatically.\n")
- if options.interactive:
- t = choose_in_numbered_list ("Choose manual for this x-ref by \
-index number or beginning of name:\n", found)
- if t:
- add_fix (type, name, t, name)
- type = t
- fixed = True
-
- if not fixed:
- # try to find a fix already made
- found = lookup_fix (name)
-
- if len (found) == 1:
- stdout.write ("Found one previous fix: %s `%s'\n" % found[0])
- if options.auto_fix or yes_prompt ("Apply this fix?"):
- type, new_name = found[0]
- fixed = True
-
- elif len (found) > 1:
- if options.interactive or options.auto_fix:
- stdout.write ("* Several previous fixes match \
-this node name, cannot fix automatically.\n")
- if options.interactive:
- concatened = choose_in_numbered_list ("Choose new manual \
-and x-ref by index number or beginning of name:\n", [''.join ([i[0], ' ', i[1]])
- for i in found],
- sep='\n')
- if concatened:
- type, new_name = concatenated.split (' ', 1)
- fixed = True
-
- if not fixed:
- # all previous automatic fixing attempts failed,
- # ask user for substring to look in node names
- while True:
- node_list = search_prompt ()
- if node_list == None:
- if options.interactive:
- stdout.write (warn_not_fixed)
- break
- elif not node_list:
- stdout.write ("No matched node names.\n")
- else:
- concatenated = choose_in_numbered_list ("Choose \
-node name and manual for this x-ref by index number or beginning of name:\n", \
- [' '.join ([i[0], i[1], '(in %s)' % i[2]])
- for i in node_list],
- sep='\n')
- if concatenated:
- t, z = concatenated.split (' ', 1)
- new_name = z.split (' (in ', 1)[0]
- add_fix (type, name, t, new_name)
- type = t
- fixed = True
- break
-
- if fixed and type == manual:
- type = 'ref'
- bad_refs_count += int (bad_ref)
- if bad_ref and not useful_fix:
- stdout.write ("*** Warning: this file is automatically generated, \
-please fix the code source instead of generated documentation.\n")
-
- # compute returned string
- if new_name == name:
- if bad_ref and (options.interactive or options.auto_fix):
- # only the type of the ref was fixed
- fixes_count += int (fixed)
- if original_display_name:
- return ('@%snamed{%s,%s}' % (type, original_name, original_display_name)) + next_char
- else:
- return ('@%s{%s}' % (type, original_name)) + next_char
- else:
- fixes_count += int (fixed)
- (ref, n) = preserve_linebreak (new_name, linebroken)
- if original_display_name:
- if bad_ref:
- stdout.write ("Current display name is `%s'\n")
- display_name = raw_input \
- ("Enter a new display name or press enter to keep the existing name:\n") \
- or display_name
- (display_name, n) = preserve_linebreak (display_name, display_linebroken)
- else:
- display_name = original_display_name
- return ('@%snamed{%s,%s}' % (type, ref, display_name)) + \
- next_char + n
- else:
- return ('@%s{%s}' % (type, ref)) + next_char + n
-
-
-log.write ("Checking cross-references...\n")
-
-try:
- for key in manuals:
- for file in manuals[key]['contents']:
- s = ref_re.sub (lambda m: check_ref (key, file, m),
- manuals[key]['contents'][file])
- if s != manuals[key]['contents'][file]:
- open (file, 'w').write (s)
-except KeyboardInterrupt:
- log.write ("Operation interrupted, exiting.\n")
- sys.exit (2)
-except InteractionError, instance:
- log.write ("Operation refused by user: %s\nExiting.\n" % instance)
- sys.exit (3)
-
-log.write ("\e[1;36mDone: %d x-refs found, %d bad x-refs found, fixed %d.\e[0m\n" %
- (refs_count, bad_refs_count, fixes_count))
+++ /dev/null
-#!/usr/bin/env python
-
-import __main__
-import optparse
-import os
-import sys
-
-import langdefs
-import buildlib
-
-verbose = 0
-use_colors = False
-lang = 'C'
-C = lang
-
-def dir_lang (file, lang, lang_dir_index):
- path_components = file.split ('/')
- path_components[lang_dir_index] = lang
- return os.path.join (*path_components)
-
-def do_file (file_name, lang_codes, buildlib):
- if verbose:
- sys.stderr.write ('%s...\n' % file_name)
- split_file_name = file_name.split ('/')
- d1, d2 = split_file_name[0:2]
- if d1 in lang_codes:
- check_lang = d1
- lang_dir_index = 0
- elif d2 in lang_codes:
- check_lang = d2
- lang_dir_index = 1
- else:
- check_lang = lang
- if check_lang == C:
- raise Exception ('cannot determine language for ' + file_name)
-
- original = dir_lang (file_name, '', lang_dir_index)
- translated_contents = open (file_name).read ()
- (diff_string, error) \
- = buildlib.check_translated_doc (original,
- file_name,
- translated_contents,
- color=use_colors and not update_mode)
-
- if error:
- sys.stderr.write ('warning: %s: %s' % (file_name, error))
-
- if update_mode:
- if error or len (diff_string) >= os.path.getsize (original):
- buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + original)
- elif diff_string:
- diff_file = original + '.diff'
- f = open (diff_file, 'w')
- f.write (diff_string)
- f.close ()
- buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + diff_file)
- os.remove (diff_file)
- else:
- sys.stdout.write (diff_string)
-
-def usage ():
- sys.stdout.write (r'''
-Usage:
-check-translation [--language=LANG] [--verbose] [--update] FILE...
-
-This script is licensed under the GNU GPL.
-''')
-
-def do_options ():
- global lang, verbose, update_mode, use_colors
-
- p = optparse.OptionParser (usage="check-translation [--language=LANG] [--verbose] FILE...",
- description="This script is licensed under the GNU GPL.")
- p.add_option ("--language",
- action='store',
- default='site',
- dest="language")
- p.add_option ("--no-color",
- action='store_false',
- default=True,
- dest="color",
- help="do not print ANSI-cooured output")
- p.add_option ("--verbose",
- action='store_true',
- default=False,
- dest="verbose",
- help="print details, including executed shell commands")
- p.add_option ('-u', "--update",
- action='store_true',
- default=False,
- dest='update_mode',
- help='call $EDITOR to update the translation')
-
- (options, files) = p.parse_args ()
- verbose = options.verbose
- lang = options.language
- use_colors = options.color
- update_mode = options.update_mode
-
- return files
-
-def main ():
- global update_mode, text_editor
-
- files = do_options ()
- if 'EDITOR' in os.environ:
- text_editor = os.environ['EDITOR']
- else:
- update_mode = False
-
- buildlib.verbose = verbose
-
- for i in files:
- do_file (i, langdefs.LANGDICT.keys (), buildlib)
-
-if __name__ == '__main__':
- main ()
+++ /dev/null
-#!/usr/bin/env python
-
-import os
-import glob
-import re
-import sys
-import optparse
-
-#File 'accidental-engraver.cc'
-#Lines executed:87.70% of 252
-
-def summary (args):
- results = []
- for f in args:
- str = open (f).read ()
- m = re.search ("File '([^']+.cc)'\s*Lines executed:([0-9.]+)% of ([0-9]+)", str)
-
- if m and '/usr/lib' in m.group (1):
- continue
-
- if m:
- cov = float (m.group (2))
- lines = int (m.group (3))
- pain = lines * (100.0 - cov)
- file = m.group (1)
- tup = (pain, locals ().copy())
-
- results.append(tup)
-
- results.sort ()
- results.reverse()
-
- print 'files sorted by number of untested lines (decreasing)'
- print
- print '%5s (%6s): %s' % ('cov %', 'lines', 'file')
- print '----------------------------------------------'
-
- for (pain, d) in results:
- print '%(cov)5.2f (%(lines)6d): %(file)s' % d
-
-class Chunk:
- def __init__ (self, range, coverage_count, all_lines, file):
- assert coverage_count >= 0
- assert type (range) == type (())
-
- self.coverage_count = coverage_count
- self.range = range
- self.all_lines = all_lines
- self.file = file
-
- def length (self):
- return self.range[1] - self.range[0]
-
- def text (self):
- return ''.join ([l[2] for l in self.lines()])
-
- def lines (self):
- return self.all_lines[self.range[0]:
- self.range[1]]
- def widen (self):
- self.range = (min (self.range[0] -1, 0),
- self.range[0] +1)
- def write (self):
- print 'chunk in', self.file
- for (c, n, l) in self.lines ():
- cov = '%d' % c
- if c == 0:
- cov = '#######'
- elif c < 0:
- cov = ''
- sys.stdout.write ('%8s:%8d:%s' % (cov, n, l))
-
- def uncovered_score (self):
- return self.length ()
-
-class SchemeChunk (Chunk):
- def uncovered_score (self):
- text = self.text ()
- if (text.startswith ('(define ')
- and not text.startswith ('(define (')):
- return 0
-
- if text.startswith ('(use-modules '):
- return 0
-
- if (text.startswith ('(define-public ')
- and not text.startswith ('(define-public (')):
- return 0
-
- return len ([l for (c,n,l) in self.lines() if (c == 0)])
-
-def read_gcov (f):
- ls = []
-
- in_lines = [l for l in open (f).readlines ()]
- (count_len, line_num_len) = tuple (map (len, in_lines[0].split (':')[:2]))
-
- for l in in_lines:
- c = l[:count_len].strip ()
- l = l[count_len+1:]
- n = int (l[:line_num_len].strip ())
-
- if n == 0:
- continue
-
- if '#' in c:
- c = 0
- elif c == '-':
- c = -1
- else:
- c = int (c)
-
- l = l[line_num_len+1:]
-
- ls.append ((c,n,l))
-
- return ls
-
-def get_c_chunks (ls, file):
- chunks = []
- chunk = []
-
- last_c = -1
- for (c, n, l) in ls:
- if not (c == last_c or c < 0 and l != '}\n'):
- if chunk and last_c >= 0:
- nums = [n-1 for (n, l) in chunk]
- chunks.append (Chunk ((min (nums), max (nums)+1),
- last_c, ls, file))
- chunk = []
-
- chunk.append ((n,l))
- if c >= 0:
- last_c = c
-
- return chunks
-
-def get_scm_chunks (ls, file):
- chunks = []
- chunk = []
-
- def new_chunk ():
- if chunk:
- nums = [n-1 for (n, l) in chunk]
- chunks.append (SchemeChunk ((min (nums), max (nums)+1),
- max (last_c, 0), ls, file))
- chunk[:] = []
-
- last_c = -1
- for (cov_count, line_number, line) in ls:
- if line.startswith ('('):
- new_chunk ()
- last_c = -1
-
- chunk.append ((line_number, line))
- if cov_count >= 0:
- last_c = cov_count
-
- return chunks
-
-def widen_chunk (ch, ls):
- a -= 1
- b += 1
-
- return [(n, l) for (c, n, l) in ls[a:b]]
-
-
-def extract_chunks (file):
- try:
- ls = read_gcov (file)
- except IOError, s :
- print s
- return []
-
- cs = []
- if 'scm' in file:
- cs = get_scm_chunks (ls, file)
- else:
- cs = get_c_chunks (ls, file)
- return cs
-
-
-def filter_uncovered (chunks):
- def interesting (c):
- if c.coverage_count > 0:
- return False
-
- t = c.text()
- for stat in ('warning', 'error', 'print', 'scm_gc_mark'):
- if stat in t:
- return False
- return True
-
- return [c for c in chunks if interesting (c)]
-
-
-def main ():
- p = optparse.OptionParser (usage="usage coverage.py [options] files",
- description="")
- p.add_option ("--summary",
- action='store_true',
- default=False,
- dest="summary")
-
- p.add_option ("--hotspots",
- default=False,
- action='store_true',
- dest="hotspots")
-
- p.add_option ("--uncovered",
- default=False,
- action='store_true',
- dest="uncovered")
-
-
- (options, args) = p.parse_args ()
-
-
- if options.summary:
- summary (['%s.gcov-summary' % s for s in args])
-
- if options.uncovered or options.hotspots:
- chunks = []
- for a in args:
- name = a
- if name.endswith ('scm'):
- name += '.cov'
- else:
- name += '.gcov'
-
- chunks += extract_chunks (name)
-
- if options.uncovered:
- chunks = filter_uncovered (chunks)
- chunks = [(c.uncovered_score (), c) for c in chunks if c.uncovered_score() > 0]
- elif options.hotspots:
- chunks = [((c.coverage_count, -c.length()), c) for c in chunks]
-
-
- chunks.sort ()
- chunks.reverse ()
- for (score, c) in chunks:
- c.write ()
-
-
-
-if __name__ == '__main__':
- main ()
+++ /dev/null
-#!/usr/bin/env python
-import sys
-import re
-import os
-
-
-full_paths = {}
-incs = {}
-inc_re = re.compile ('^#include "([^"]+)"')
-def parse_file (fn):
- lst = []
-
- lc = 0
- for l in open (fn).readlines():
- lc += 1
- m = inc_re.search (l)
- if m:
- lst.append ((lc, m.group (1)))
-
- base = os.path.split (fn)[1]
- full_paths[base] = fn
- incs[base] = lst
-
-
-def has_include (f, name):
- try:
- return name in [b for (a,b) in incs[f]]
- except KeyError:
- return False
-
-for a in sys.argv:
- parse_file (a)
-
-print '-*-compilation-*-'
-for (f, lst) in incs.items ():
- for (n, inc) in lst:
- for (n2, inc2) in lst:
- if has_include (inc2, inc):
- print "%s:%d: already have %s from %s" % (full_paths[f], n,
- inc, inc2)
- break
-
-
-
+++ /dev/null
-#!/usr/bin/env python
-
-# fixcc -- nitpick lily's c++ code
-
-# TODO
-# * maintainable rules: regexp's using whitespace (?x) and match names
-# <identifier>)
-# * trailing `*' vs. function definition
-# * do not break/change indentation of fixcc-clean files
-# * check lexer, parser
-# * rewrite in elisp, add to cc-mode
-# * using regexes is broken by design
-# * ?
-# * profit
-
-import __main__
-import getopt
-import os
-import re
-import string
-import sys
-import time
-
-COMMENT = 'COMMENT'
-STRING = 'STRING'
-GLOBAL_CXX = 'GC++'
-CXX = 'C++'
-verbose_p = 0
-indent_p = 0
-
-rules = {
- GLOBAL_CXX:
- [
- # delete gratuitous block
- ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
- '\n\\2;'),
- ],
- CXX:
- [
- # space before parenthesis open
- ('([^\( \]])[ \t]*\(', '\\1 ('),
- # space after comma
- ("\([^'],\)[ \t]*", '\1 '),
- # delete gratuitous block
- ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
- '\n\\2;'),
- # delete inline tabs
- ('(\w)\t+', '\\1 '),
- # delete inline double spaces
- (' *', ' '),
- # delete space after parenthesis open
- ('\([ \t]*', '('),
- # delete space before parenthesis close
- ('[ \t]*\)', ')'),
- # delete spaces after prefix
- ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'),
- # delete spaces before postfix
- ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'),
- # delete space after parenthesis close
- #('\)[ \t]*([^\w])', ')\\1'),
- # delete space around operator
- # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
- ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
- # delete space after operator
- ('(::)([ \t]*)([\w\(\)])', '\\1\\3'),
- # delete superflous space around operator
- ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'),
- # space around operator1
- ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
- # space around operator2
- ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'),
- # space around operator3
- ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
- # space around operator4
- ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'),
- # space around +/-; exponent
- ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'),
- ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'),
- # trailing operator
- (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '),
- # pointer
- ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
- ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
- #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'),
- # pointer with template
- ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'),
- #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'),
- # unary pointer, minus, not
- ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'),
- # space after `operator'
- ('(\Woperator) *([^\w\s])', '\\1 \\2'),
- # dangling brace close
- ('\n[ \t]*(\n[ \t]*})', '\\1'),
- # dangling newline
- ('\n[ \t]*\n[ \t]*\n', '\n\n'),
- # dangling parenthesis open
- #('[ \t]*\n[ \t]*\([ \t]*\n', '('),
- ('\([ \t]*\n', '('),
- # dangling parenthesis close
- ('\n[ \t]*\)', ')'),
- # dangling comma
- ('\n[ \t]*,', ','),
- # dangling semicolon
- ('\n[ \t]*;', ';'),
- # brace open
- ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'),
- # brace open backslash
- ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'),
- # brace close
- ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'),
- # brace close backslash
- ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'),
- # delete space after `operator'
- #('(\Woperator) (\W)', '\\1\\2'),
- # delete space after case, label
- ('(\W(case|label) ([\w]+)) :', '\\1:'),
- # delete space before comma
- ('[ \t]*,', ','),
- # delete space before semicolon
- ('[ \t]*;', ';'),
- # delete space before eol-backslash
- ('[ \t]*\\\\\n', '\\\n'),
- # delete trailing whitespace
- ('[ \t]*\n', '\n'),
-
- ## Deuglify code that also gets ugly by rules above.
- # delete newline after typedef struct
- ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'),
- # delete spaces around template brackets
- #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'),
- ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'),
- ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'),
- ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'),
- ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'),
- # do {..} while
- ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'),
-
- ## Fix code that gets broken by rules above.
- ##('->\s+\*', '->*'),
- # delete space before #define x()
- ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('),
- # add space in #define x ()
- ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)',
- '#define \\1 \\2'),
- # delete space in #include <>
- ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>',
- '#include <\\1\\2\\3>'),
- # delete backslash before empty line (emacs' indent region is broken)
- ('\\\\\n\n', '\n\n'),
- ],
-
- COMMENT:
- [
- # delete trailing whitespace
- ('[ \t]*\n', '\n'),
- # delete empty first lines
- ('(/\*\n)\n*', '\\1'),
- # delete empty last lines
- ('\n*(\n\*/)', '\\1'),
- ## delete newline after start?
- #('/(\*)\n', '\\1'),
- ## delete newline before end?
- #('\n(\*/)', '\\1'),
- ],
- }
-
-# Recognize special sequences in the input.
-#
-# (?P<name>regex) -- Assign result of REGEX to NAME.
-# *? -- Match non-greedily.
-# (?m) -- Multiline regex: Make ^ and $ match at each line.
-# (?s) -- Make the dot match all characters including newline.
-# (?x) -- Ignore whitespace in patterns.
-no_match = 'a\ba'
-snippet_res = {
- CXX: {
- 'multiline_comment':
- r'''(?sx)
- (?P<match>
- (?P<code>
- [ \t]*/\*.*?\*/))''',
-
- 'singleline_comment':
- r'''(?mx)
- ^.*
- (?P<match>
- (?P<code>
- [ \t]*//([ \t][^\n]*|)\n))''',
-
- 'string':
- r'''(?x)
- (?P<match>
- (?P<code>
- "([^\"\n](\")*)*"))''',
-
- 'char':
- r'''(?x)
- (?P<match>
- (?P<code>
- '([^']+|\')))''',
-
- 'include':
- r'''(?x)
- (?P<match>
- (?P<code>
- "#[ \t]*include[ \t]*<[^>]*>''',
- },
- }
-
-class Chunk:
- def replacement_text (self):
- return ''
-
- def filter_text (self):
- return self.replacement_text ()
-
-class Substring (Chunk):
- def __init__ (self, source, start, end):
- self.source = source
- self.start = start
- self.end = end
-
- def replacement_text (self):
- s = self.source[self.start:self.end]
- if verbose_p:
- sys.stderr.write ('CXX Rules')
- for i in rules[CXX]:
- if verbose_p:
- sys.stderr.write ('.')
- #sys.stderr.write ('\n\n***********\n')
- #sys.stderr.write (i[0])
- #sys.stderr.write ('\n***********\n')
- #sys.stderr.write ('\n=========>>\n')
- #sys.stderr.write (s)
- #sys.stderr.write ('\n<<=========\n')
- s = re.sub (i[0], i[1], s)
- if verbose_p:
- sys.stderr.write ('done\n')
- return s
-
-
-class Snippet (Chunk):
- def __init__ (self, type, match, format):
- self.type = type
- self.match = match
- self.hash = 0
- self.options = []
- self.format = format
-
- def replacement_text (self):
- return self.match.group ('match')
-
- def substring (self, s):
- return self.match.group (s)
-
- def __repr__ (self):
- return `self.__class__` + ' type = ' + self.type
-
-class Multiline_comment (Snippet):
- def __init__ (self, source, match, format):
- self.type = type
- self.match = match
- self.hash = 0
- self.options = []
- self.format = format
-
- def replacement_text (self):
- s = self.match.group ('match')
- if verbose_p:
- sys.stderr.write ('COMMENT Rules')
- for i in rules[COMMENT]:
- if verbose_p:
- sys.stderr.write ('.')
- s = re.sub (i[0], i[1], s)
- return s
-
-snippet_type_to_class = {
- 'multiline_comment': Multiline_comment,
-# 'string': Multiline_comment,
-# 'include': Include_snippet,
-}
-
-def find_toplevel_snippets (s, types):
- if verbose_p:
- sys.stderr.write ('Dissecting')
-
- res = {}
- for i in types:
- res[i] = re.compile (snippet_res[format][i])
-
- snippets = []
- index = 0
- ## found = dict (map (lambda x: (x, None),
- ## types))
- ## urg python2.1
- found = {}
- map (lambda x, f = found: f.setdefault (x, None),
- types)
-
- # We want to search for multiple regexes, without searching
- # the string multiple times for one regex.
- # Hence, we use earlier results to limit the string portion
- # where we search.
- # Since every part of the string is traversed at most once for
- # every type of snippet, this is linear.
-
- while 1:
- if verbose_p:
- sys.stderr.write ('.')
- first = None
- endex = 1 << 30
- for type in types:
- if not found[type] or found[type][0] < index:
- found[type] = None
- m = res[type].search (s[index:endex])
- if not m:
- continue
-
- cl = Snippet
- if snippet_type_to_class.has_key (type):
- cl = snippet_type_to_class[type]
- snip = cl (type, m, format)
- start = index + m.start ('match')
- found[type] = (start, snip)
-
- if found[type] \
- and (not first \
- or found[type][0] < found[first][0]):
- first = type
-
- # FIXME.
-
- # Limiting the search space is a cute
- # idea, but this *requires* to search
- # for possible containing blocks
- # first, at least as long as we do not
- # search for the start of blocks, but
- # always/directly for the entire
- # @block ... @end block.
-
- endex = found[first][0]
-
- if not first:
- snippets.append (Substring (s, index, len (s)))
- break
-
- (start, snip) = found[first]
- snippets.append (Substring (s, index, start))
- snippets.append (snip)
- found[first] = None
- index = start + len (snip.match.group ('match'))
-
- return snippets
-
-def nitpick_file (outdir, file):
- s = open (file).read ()
-
- for i in rules[GLOBAL_CXX]:
- s = re.sub (i[0], i[1], s)
-
- # FIXME: Containing blocks must be first, see
- # find_toplevel_snippets.
- # We leave simple strings be part of the code
- snippet_types = (
- 'multiline_comment',
- 'singleline_comment',
- 'string',
-# 'char',
- )
-
- chunks = find_toplevel_snippets (s, snippet_types)
- #code = filter (lambda x: is_derived_class (x.__class__, Substring),
- # chunks)
-
- t = string.join (map (lambda x: x.filter_text (), chunks), '')
- fixt = file
- if s != t:
- if not outdir:
- os.system ('mv %s %s~' % (file, file))
- else:
- fixt = os.path.join (outdir,
- os.path.basename (file))
- h = open (fixt, "w")
- h.write (t)
- h.close ()
- if s != t or indent_p:
- indent_file (fixt)
-
-def indent_file (file):
- emacs = '''emacs\
- --no-window-system\
- --batch\
- --no-site-file\
- --no-init-file\
- %(file)s\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "cc-mode")
- (c++-mode)
- (indent-region (point-min) (point-max))
- (if (buffer-modified-p (current-buffer))
- (save-buffer)))' ''' % vars ()
- emacsclient = '''emacsclient\
- --socket-name=%(socketdir)s/%(socketname)s\
- --no-wait\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "cc-mode")
- (find-file "%(file)s")
- (c++-mode)
- (indent-region (point-min) (point-max))
- (if (buffer-modified-p (current-buffer))
- (save-buffer)))' ''' \
- % { 'file': file,
- 'socketdir' : socketdir,
- 'socketname' : socketname, }
- if verbose_p:
- sys.stderr.write (emacs)
- sys.stderr.write ('\n')
- os.system (emacs)
-
-
-def usage ():
- sys.stdout.write (r'''
-Usage:
-fixcc [OPTION]... FILE...
-
-Options:
- --help
- --indent reindent, even if no changes
- --verbose
- --test
-
-Typical use with LilyPond:
-
- fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out)
-
-This script is licensed under the GNU GPL
-''')
-
-def do_options ():
- global indent_p, outdir, verbose_p
- (options, files) = getopt.getopt (sys.argv[1:], '',
- ['help', 'indent', 'outdir=',
- 'test', 'verbose'])
- for (o, a) in options:
- if o == '--help':
- usage ()
- sys.exit (0)
- elif o == '--indent':
- indent_p = 1
- elif o == '--outdir':
- outdir = a
- elif o == '--verbose':
- verbose_p = 1
- elif o == '--test':
- test ()
- sys.exit (0)
- else:
- assert unimplemented
- if not files:
- usage ()
- sys.exit (2)
- return files
-
-
-outdir = 0
-format = CXX
-socketdir = '/tmp/fixcc'
-socketname = 'fixcc%d' % os.getpid ()
-
-def setup_client ():
- #--no-window-system\
- #--batch\
- os.unlink (os.path.join (socketdir, socketname))
- os.mkdir (socketdir, 0700)
- emacs='''emacs\
- --no-site-file\
- --no-init-file\
- --eval '(let ((error nil)
- (version-control nil))
- (load-library "server")
- (setq server-socket-dir "%(socketdir)s")
- (setq server-name "%(socketname)s")
- (server-start)
- (while t) (sleep 1000))' ''' \
- % { 'socketdir' : socketdir,
- 'socketname' : socketname, }
-
- if not os.fork ():
- os.system (emacs)
- sys.exit (0)
- while not os.path.exists (os.path.join (socketdir, socketname)):
- time.sleep (1)
-
-def main ():
- #emacsclient should be faster, but this does not work yet
- #setup_client ()
- files = do_options ()
- if outdir and not os.path.isdir (outdir):
- os.makedirs (outdir)
- for i in files:
- sys.stderr.write ('%s...\n' % i)
- nitpick_file (outdir, i)
-
-
-## TODO: make this compilable and check with g++
-TEST = '''
-#include <libio.h>
-#include <map>
-class
-ostream ;
-
-class Foo {
-public: static char* foo ();
-std::map<char*,int>* bar (char, char) { return 0; }
-};
-typedef struct
-{
- Foo **bar;
-} String;
-
-ostream &
-operator << (ostream & os, String d);
-
-typedef struct _t_ligature
-{
- char *succ, *lig;
- struct _t_ligature * next;
-} AFM_Ligature;
-
-typedef std::map < AFM_Ligature const *, int > Bar;
-
- /**
- (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
- */
-
-/* ||
-* vv
-* !OK OK
-*/
-/* ||
- vv
- !OK OK
-*/
-char *
-Foo:: foo ()
-{
-int
-i
-;
- char* a= &++ i ;
- a [*++ a] = (char*) foe (*i, &bar) *
- 2;
- int operator double ();
- std::map<char*,int> y =*bar(-*a ,*b);
- Interval_t<T> & operator*= (T r);
- Foo<T>*c;
- int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2);
- delete *p;
- if (abs (f)*2 > abs (d) *FUDGE)
- ;
- while (0);
- for (; i<x foo(); foo>bar);
- for (; *p && > y;
- foo > bar)
-;
- do {
- ;;;
- }
- while (foe);
-
- squiggle. extent;
- 1 && * unsmob_moment (lf);
- line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm
-(): SCM_EOL);
- case foo: k;
-
- if (0) {a=b;} else {
- c=d;
- }
-
- cookie_io_functions_t Memory_out_stream::functions_ = {
- Memory_out_stream::reader,
- ...
- };
-
- int compare (Array < Pitch> *, Array < Pitch> *);
- original_ = (Grob *) & s;
- Drul_array< Link_array<Grob> > o;
-}
-
- header_.char_info_pos = (6 + header_length) * 4;
- return ly_bool2scm (*ma < * mb);
-
- 1 *::sign(2);
-
- (shift) *-d;
-
- a = 0 ? *x : *y;
-
-a = "foo() 2,2,4";
-{
- if (!span_)
- {
- span_ = make_spanner ("StaffSymbol", SCM_EOL);
- }
-}
-{
- if (!span_)
- {
- span_ = make_spanner (StaffSymbol, SCM_EOL);
- }
-}
-'''
-
-def test ():
- test_file = 'fixcc.cc'
- open (test_file, 'w').write (TEST)
- nitpick_file (outdir, test_file)
- sys.stdout.write (open (test_file).read ())
-
-if __name__ == '__main__':
- main ()
-
+++ /dev/null
-#!/usr/bin/env python
-
-import sys
-import os
-import glob
-import re
-
-USAGE = ''' Usage: makelsr.py LSR_SNIPPETS_DIR
-This script must be run from top of the source tree;
-it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR.
-'''
-
-LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it
-%% This file is in the public domain.
-'''
-
-LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new
-%% This file is in the public domain.
-'''
-
-DEST = os.path.join ('input', 'lsr')
-NEW_LYS = os.path.join ('input', 'new')
-TEXIDOCS = os.path.join ('input', 'texidocs')
-
-TAGS = []
-# NR 1
-TAGS.extend (['pitches', 'rhythms', 'expressive-marks',
-'repeats', 'simultaneous-notes', 'staff-notation',
-'editorial-annotations', 'text'])
-# NR 2
-TAGS.extend (['vocal-music', 'chords', 'keyboards',
-'percussion', 'fretted-strings', 'unfretted-strings',
-'ancient-notation', 'winds', 'world-music'
-])
-
-# other
-TAGS.extend (['contexts-and-engravers', 'tweaks-and-overrides',
-'paper-and-layout', 'breaks', 'spacing', 'midi', 'titles', 'template'])
-
-def exit_with_usage (n=0):
- sys.stderr.write (USAGE)
- sys.exit (n)
-
-try:
- in_dir = sys.argv[1]
-except:
- exit_with_usage (2)
-
-if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)):
- exit_with_usage (3)
-
-unsafe = []
-unconverted = []
-notags_files = []
-
-# mark the section that will be printed verbatim by lilypond-book
-end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S)
-
-def mark_verbatim_section (ly_code):
- return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1)
-
-# '% LSR' comments are to be stripped
-lsr_comment_re = re.compile (r'\s*%+\s*LSR.*')
-
-begin_header_re = re.compile (r'\\header\s*{', re.M)
-
-# add tags to ly files from LSR
-def add_tags (ly_code, tags):
- return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1)
-
-def copy_ly (srcdir, name, tags):
- global unsafe
- global unconverted
- dest = os.path.join (DEST, name)
- tags = ', '.join (tags)
- s = open (os.path.join (srcdir, name)).read ()
-
- texidoc_translations_path = os.path.join (TEXIDOCS,
- os.path.splitext (name)[0] + '.texidoc')
- if os.path.exists (texidoc_translations_path):
- texidoc_translations = open (texidoc_translations_path).read ()
- # Since we want to insert the translations verbatim using a
- # regexp, \\ is understood as ONE escaped backslash. So we have
- # to escape those backslashes once more...
- texidoc_translations = texidoc_translations.replace ('\\', '\\\\')
- s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1)
-
- if in_dir in srcdir:
- s = LY_HEADER_LSR + add_tags (s, tags)
- else:
- s = LY_HEADER_NEW + s
-
- s = mark_verbatim_section (s)
- s = lsr_comment_re.sub ('', s)
- open (dest, 'w').write (s)
-
- e = os.system ("convert-ly -e '%s'" % dest)
- if e:
- unconverted.append (dest)
- if os.path.exists (dest + '~'):
- os.remove (dest + '~')
- # -V seems to make unsafe snippets fail nicer/sooner
- e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest)
- if e:
- unsafe.append (dest)
-
-def read_source_with_dirs (src):
- s = {}
- l = {}
- for tag in TAGS:
- srcdir = os.path.join (src, tag)
- l[tag] = set (map (os.path.basename, glob.glob (os.path.join (srcdir, '*.ly'))))
- for f in l[tag]:
- if f in s:
- s[f][1].append (tag)
- else:
- s[f] = (srcdir, [tag])
- return s, l
-
-
-tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"')
-
-def read_source (src):
- s = {}
- l = dict ([(tag, set()) for tag in TAGS])
- for f in glob.glob (os.path.join (src, '*.ly')):
- basename = os.path.basename (f)
- m = tags_re.search (open (f, 'r').read ())
- if m:
- file_tags = [tag.strip() for tag in m.group (1). split(',')]
- s[basename] = (src, file_tags)
- [l[tag].add (basename) for tag in file_tags if tag in TAGS]
- else:
- notags_files.append (f)
- return s, l
-
-
-def dump_file_list (file, list):
- f = open (file, 'w')
- f.write ('\n'.join (list) + '\n')
-
-## clean out existing lys and generated files
-map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) +
- glob.glob (os.path.join (DEST, '*.snippet-list')))
-
-# read LSR source where tags are defined by subdirs
-snippets, tag_lists = read_source_with_dirs (in_dir)
-# read input/new where tags are directly
-s, l = read_source (NEW_LYS)
-snippets.update (s)
-for t in TAGS:
- tag_lists[t].update (l[t])
-
-for (name, (srcdir, tags)) in snippets.items ():
- copy_ly (srcdir, name, tags)
-
-for (tag, file_set) in tag_lists.items ():
- dump_file_list (os.path.join (DEST, tag + '.snippet-list'), sorted(file_set))
-
-if unconverted:
- sys.stderr.write ('These files could not be converted successfully by convert-ly:\n')
- sys.stderr.write ('\n'.join (unconverted) + '\n\n')
-
-if notags_files:
- sys.stderr.write ('No tags could be found in these files:\n')
- sys.stderr.write ('\n'.join (notags_files) + '\n\n')
-
-dump_file_list ('lsr-unsafe.txt', unsafe)
-sys.stderr.write ('''
-
-Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY!
- git add input/lsr/*.ly
- xargs git-diff HEAD < lsr-unsafe.txt
-
-''')
-
+++ /dev/null
-#!/usr/bin/env python
-
-notes = "CDEFGAB"
-alterations = [-1, 0, 1]
-
-def print_note (octave, note, alteration):
- print " <note>\n <pitch>\n <step>%s</step>" % notes[note]
- if alteration <> 0:
- print " <alter>%s</alter>" % alteration
- print " <octave>%s</octave>\n </pitch>\n <duration>1</duration>\n <voice>1</voice>\n <type>quarter</type>\n </note>" % octave
-
-
-print """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
- "http://www.musicxml.org/dtds/partwise.dtd">
-<score-partwise>
- <movement-title>Various piches and interval sizes</movement-title>
- <part-list>
- <score-part id="P1">
- <part-name>MusicXML Part</part-name>
- </score-part>
- </part-list>
- <!--=========================================================-->
- <part id="P1">
- <measure number="1">
- <attributes>
- <divisions>1</divisions>
- <key>
- <fifths>0</fifths>
- <mode>major</mode>
- </key>
- <time symbol="common">
- <beats>2</beats>
- <beat-type>4</beat-type>
- </time>
- <clef>
- <sign>G</sign>
- <line>2</line>
- </clef>
- </attributes>
-"""
-
-start_octave = 5
-
-for octave in (start_octave, start_octave+1):
- for note in (0,1,2,3,4,5,6):
- for alteration in alterations:
- if octave == start_octave and note == 0 and alteration == -1:
- continue
- print_note (octave, note, alteration)
-# if octave == start_octave and note == 0 and alteration == 0:
-# continue
- print_note (start_octave-(octave-start_octave)-(1-(7-note)/7), (7-note)%7, -alteration)
-
-print """ </measure>
- </part>
-</score-partwise>
-"""
+++ /dev/null
-#!/usr/bin/env python
-
-notes = "CDEFGAB"
-alterations = [-1, 0, 1]
-
-def print_measure (nr, fifth, mode, atts1 = "", atts = "", final = ""):
- print """ <measure number="%s">
- <attributes>
-%s <key>
- <fifths>%s</fifths>
- <mode>%s</mode>
- </key>
-%s </attributes>
- <note>
- <pitch>
- <step>C</step>
- <octave>4</octave>
- </pitch>
- <duration>2</duration>
- <voice>1</voice>
- <type>half</type>
- </note>
-%s </measure>""" % (nr, atts1, fifth, mode, atts, final)
-
-first_div = """ <divisions>1</divisions>
-"""
-first_atts = """ <time symbol="common">
- <beats>2</beats>
- <beat-type>4</beat-type>
- </time>
- <clef>
- <sign>G</sign>
- <line>2</line>
- </clef>
-"""
-
-final_barline = """ <barline location="right">
- <bar-style>light-heavy</bar-style>
- </barline>
-"""
-
-print """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
- "http://www.musicxml.org/dtds/partwise.dtd">
-<score-partwise>
- <movement-title>Different Key signatures</movement-title>
- <identification>
- <miscellaneous>
- <miscellaneous-field name="description">Various key signature: from 11
- flats to 11 sharps (each one first one measure in major, then one
- measure in minor)</miscellaneous-field>
- </miscellaneous>
- </identification>
- <part-list>
- <score-part id="P1">
- <part-name>MusicXML Part</part-name>
- </score-part>
- </part-list>
- <!--=========================================================-->
- <part id="P1">"""
-
-max_range = 11
-measure = 0
-for fifth in range(-max_range, max_range+1):
- measure += 1
- if fifth == -max_range:
- print_measure (measure, fifth, "major", first_div, first_atts)
- else:
- print_measure (measure, fifth, "major")
- measure += 1
- if fifth == max_range:
- print_measure (measure, fifth, "minor", "", "", final_barline)
- else:
- print_measure (measure, fifth, "minor")
-
-
-print """ </part>
-</score-partwise>"""
+++ /dev/null
-#!/usr/bin/env python
-
-notes = "CDEFGAB"
-alterations = [-1, 0, 1]
-
-dot_xml = """ <dot/>
-"""
-tie_xml = """ <tie type="%s"/>
-"""
-tie_notation_xml = """ <notations><tied type="%s"/></notations>
-"""
-
-
-def generate_note (duration, end_tie = False):
- if duration < 2:
- (notetype, dur) = ("8th", 1)
- elif duration < 4:
- (notetype, dur) = ("quarter", 2)
- elif duration < 8:
- (notetype, dur) = ("half", 4)
- else:
- (notetype, dur) = ("whole", 8)
- dur_processed = dur
- dot = ""
- if (duration - dur_processed >= dur/2):
- dot = dot_xml
- dur_processed += dur/2
- if (duration - dur_processed >= max(dur/4, 1)):
- dot += dot_xml
- dur_processed += dur/4
- tie = ""
- tie_notation = ""
- if end_tie:
- tie += tie_xml % "stop"
- tie_notation += tie_notation_xml % "stop"
- second_note = None
- if duration - dur_processed > 0:
- second_note = generate_note (duration-dur_processed, True)
- tie += tie_xml % "start"
- tie_notation += tie_notation_xml % "start"
- note = """ <note>
- <pitch>
- <step>C</step>
- <octave>5</octave>
- </pitch>
- <duration>%s</duration>
-%s <voice>1</voice>
- <type>%s</type>
-%s%s </note>""" % (dur_processed, tie, notetype, dot, tie_notation)
- if second_note:
- return "%s\n%s" % (note, second_note)
- else:
- return note
-
-def print_measure (nr, beats, type, params = "", attr = "", attr2 = "", barline = ""):
- duration = 8*beats/type
- note = generate_note (duration)
-
- print """ <measure number="%s">
- <attributes>
-%s <time%s>
- <beats>%s</beats>
- <beat-type>%s</beat-type>
- </time>
-%s </attributes>
-%s
-%s </measure>""" % (nr, attr, params, beats, type, attr2, note, barline)
-
-first_key = """ <divisions>2</divisions>
- <key>
- <fifths>0</fifths>
- <mode>major</mode>
- </key>
-"""
-first_clef = """ <clef>
- <sign>G</sign>
- <line>2</line>
- </clef>
-"""
-
-final_barline = """ <barline location="right">
- <bar-style>light-heavy</bar-style>
- </barline>
-"""
-
-print """<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
- "http://www.musicxml.org/dtds/partwise.dtd">
-<score-partwise>
- <identification>
- <miscellaneous>
- <miscellaneous-field name="description">Various time signatures: 2/2
- (alla breve), 4/4 (C), 2/2, 3/2, 2/4, 3/4, 4/4, 5/4, 3/8, 6/8,
- 12/8</miscellaneous-field>
- </miscellaneous>
- </identification>
- <part-list>
- <score-part id="P1">
- <part-name>MusicXML Part</part-name>
- </score-part>
- </part-list>
- <!--=========================================================-->
- <part id="P1">"""
-
-measure = 1
-
-print_measure (measure, 2, 2, " symbol=\"common\"", first_key, first_clef)
-measure += 1
-
-print_measure (measure, 4, 4, " symbol=\"common\"")
-measure += 1
-
-print_measure (measure, 2, 2)
-measure += 1
-
-print_measure (measure, 3, 2)
-measure += 1
-
-print_measure (measure, 2, 4)
-measure += 1
-
-print_measure (measure, 3, 4)
-measure += 1
-
-print_measure (measure, 4, 4)
-measure += 1
-
-print_measure (measure, 5, 4)
-measure += 1
-
-print_measure (measure, 3, 8)
-measure += 1
-
-print_measure (measure, 6, 8)
-measure += 1
-
-print_measure (measure, 12, 8, "", "", "", final_barline)
-measure += 1
-
-print """ </part>
-</score-partwise>"""
+++ /dev/null
-Open($1);
-MergeKern($2)
-
-
-# The AFM files of `New Century Schoolbook' family as distributed within the
-# urw-fonts-1.0.7pre41.tar.bz2 archive contain a bunch of ligatures which
-# shouldn't be active by default:
-#
-# T + M -> trademark
-# N + o -> afii61352
-# i + j -> ij
-# I + J -> IJ
-#
-# This font bundle is shipped by Fedora Core 6 and other GNU/Linux
-# distributions; we simply remove those ligatures.
-
-SelectIf("trademark", "trademark", \
- "afii61352", "afii61352", \
- "ij", "ij", \
- "IJ", "IJ");
-if (Strtol($version) < 20070501)
- RemoveATT("Ligature", "*", "*");
-else
- RemovePosSub("*");
-endif
-
-Generate($3 + $fontname + ".otf");
-
-# EOF
+++ /dev/null
-#!/usr/bin/env python
-import os
-import sys
-
-for i in sys.argv[1:]:
- print os.path.realpath (i)
+++ /dev/null
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Temporary script that helps translated docs sources conversion
-# for texi2html processing
-
-# USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES
-
-print "tely-gettext.py"
-
-import sys
-import re
-import os
-import gettext
-
-if len (sys.argv) > 3:
- buildscript_dir, localedir, lang = sys.argv[1:4]
-else:
- print """USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES
- For example scripts/aux/tely-gettext.py python/out Documentation/po/out-www de Documentation/de/user/*.tely"""
- sys.exit (1)
-
-sys.path.append (buildscript_dir)
-import langdefs
-
-double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
-t = gettext.translation('lilypond-doc', localedir, [lang])
-_doc = t.gettext
-
-include_re = re.compile (r'@include (.*?)$', re.M)
-whitespaces = re.compile (r'\s+')
-ref_re = re.compile (r'(?ms)@(ruser|rprogram|ref|rlearning)\{(.*?)\}')
-node_section_re = re.compile (r'@node (.*?)\n@((?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) (.*?)\n')
-menu_entry_re = re.compile (r'\* (.*?)::')
-
-def ref_gettext (m):
- r = whitespaces.sub (' ', m.group (2))
- return '@' + m.group (1) + '{' + _doc (r) + '}'
-
-def node_gettext (m):
- return '@node ' + _doc (m.group (1)) + '\n@' + \
- m.group (2) + ' ' + _doc (m.group (3)) + \
- '\n@translationof ' + m.group (1) + '\n'
-
-def menu_entry_gettext (m):
- return '* ' + _doc (m.group (1)) + '::'
-
-def process_file (filename):
- print "Processing %s" % filename
- f = open (filename, 'r')
- page = f.read ()
- f.close()
- page = node_section_re.sub (node_gettext, page)
- page = ref_re.sub (ref_gettext, page)
- page = menu_entry_re.sub (menu_entry_gettext, page)
- page = page.replace ("""-- SKELETON FILE --
-When you actually translate this file, please remove these lines as
-well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", """@c -- SKELETON FILE --""")
- page = page.replace ('UNTRANSLATED NODE: IGNORE ME', "@c UNTRANSLATED NODE: IGNORE ME")
- includes = [whitespaces.sub ('', f) for f in include_re.findall (page)]
- f = open (filename, 'w')
- f.write (page)
- f.close ()
- dir = os.path.dirname (filename)
- for file in includes:
- p = os.path.join (dir, file)
- if os.path.exists (p):
- process_file (p)
-
-for filename in sys.argv[4:]:
- process_file (filename)
+++ /dev/null
-#!/usr/bin/env python
-# texi-langutils.py
-
-# WARNING: this script can't find files included in a different directory
-
-import sys
-import re
-import getopt
-import os
-
-import langdefs
-
-def read_pipe (command):
- print command
- pipe = os.popen (command)
- output = pipe.read ()
- if pipe.close ():
- print "pipe failed: %(command)s" % locals ()
- return output
-
-
-optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext'])
-process_includes = not ('-n', '') in optlist # -n don't process @include's in texinfo files
-
-make_gettext = ('--gettext', '') in optlist # --gettext generate a node list from a Texinfo source
-make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source
-
-output_file = 'doc.pot'
-
-# @untranslated should be defined as a macro in Texinfo source
-node_blurb = '''@untranslated
-'''
-doclang = ''
-head_committish = read_pipe ('git-rev-parse HEAD')
-intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*-
-@c This file is part of %(topfile)s
-@ignore
- Translation of GIT committish: %(head_committish)s
- When revising a translation, copy the HEAD committish of the
- version that you are working on. See TRANSLATION for details.
-@end ignore
-'''
-
-end_blurb = """
-@c -- SKELETON FILE --
-"""
-
-for x in optlist:
- if x[0] == '-o': # -o NAME set PO output file name to NAME
- output_file = x[1]
- elif x[0] == '-d': # -d DIR set working directory to DIR
- os.chdir (x[1])
- elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB
- node_blurb = x[1]
- elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB
- intro_blurb = x[1]
- elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG
- doclang = '; documentlanguage: ' + x[1]
-
-texinfo_with_menus_re = re.compile (r"^(\*) +([^:\n]+)::.*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.*?)$|@(rglos){(.+?)}", re.M)
-
-texinfo_re = re.compile (r"^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M)
-
-ly_string_re = re.compile (r'^([a-zA-Z]+)[\t ]*=|%+[\t ]*(.*)$|\\(?:new|context)\s+(?:[a-zA-Z]*?(?:Staff(?:Group)?|Voice|FiguredBass|FretBoards|Names|Devnull))\s+=\s+"?([a-zA-Z]+)"?\s+')
-lsr_verbatim_ly_re = re.compile (r'% begin verbatim$')
-texinfo_verbatim_ly_re = re.compile (r'^@lilypond\[.*?verbatim')
-
-def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None, scan_ly=False):
- try:
- f = open (texifilename, 'r')
- texifile = f.read ()
- f.close ()
- printedfilename = texifilename.replace ('../','')
- includes = []
-
- # process ly var names and comments
- if output_file and (scan_ly or texifilename.endswith ('.ly')):
- lines = texifile.splitlines ()
- i = 0
- in_verb_ly_block = False
- if texifilename.endswith ('.ly'):
- verbatim_ly_re = lsr_verbatim_ly_re
- else:
- verbatim_ly_re = texinfo_verbatim_ly_re
- for i in range (len (lines)):
- if verbatim_ly_re.search (lines[i]):
- in_verb_ly_block = True
- elif lines[i].startswith ('@end lilypond'):
- in_verb_ly_block = False
- elif in_verb_ly_block:
- for (var, comment, context_id) in ly_string_re.findall (lines[i]):
- if var:
- output_file.write ('# ' + printedfilename + ':' + \
- str (i + 1) + ' (variable)\n_(r"' + var + '")\n')
- elif comment:
- output_file.write ('# ' + printedfilename + ':' + \
- str (i + 1) + ' (comment)\n_(r"' + \
- comment.replace ('"', '\\"') + '")\n')
- elif context_id:
- output_file.write ('# ' + printedfilename + ':' + \
- str (i + 1) + ' (context id)\n_(r"' + \
- context_id + '")\n')
-
- # process Texinfo node names and section titles
- if write_skeleton:
- g = open (os.path.basename (texifilename), 'w')
- subst = globals ()
- subst.update (locals ())
- g.write (i_blurb % subst)
- tutu = texinfo_with_menus_re.findall (texifile)
- node_trigger = False
- for item in tutu:
- if item[0] == '*':
- g.write ('* ' + item[1] + '::\n')
- elif output_file and item[4] == 'rglos':
- output_file.write ('_(r"' + item[5] + '") # @rglos in ' + printedfilename + '\n')
- elif item[2] == 'menu':
- g.write ('@menu\n')
- elif item[2] == 'end menu':
- g.write ('@end menu\n\n')
- else:
- g.write ('@' + item[2] + ' ' + item[3] + '\n')
- if node_trigger:
- g.write (n_blurb)
- node_trigger = False
- elif item[2] == 'include':
- includes.append (item[3])
- else:
- if output_file:
- output_file.write ('# @' + item[2] + ' in ' + \
- printedfilename + '\n_(r"' + item[3].strip () + '")\n')
- if item[2] == 'node':
- node_trigger = True
- g.write (end_blurb)
- g.close ()
-
- elif output_file:
- toto = texinfo_re.findall (texifile)
- for item in toto:
- if item[0] == 'include':
- includes.append(item[1])
- elif item[2] == 'rglos':
- output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n')
- else:
- output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n')
-
- if process_includes:
- dir = os.path.dirname (texifilename)
- for item in includes:
- process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file, scan_ly)
- except IOError, (errno, strerror):
- sys.stderr.write ("I/O error(%s): %s: %s\n" % (errno, texifilename, strerror))
-
-
-if intro_blurb != '':
- intro_blurb += '\n\n'
-if node_blurb != '':
- node_blurb = '\n' + node_blurb + '\n\n'
-if make_gettext:
- node_list_filename = 'node_list'
- node_list = open (node_list_filename, 'w')
- node_list.write ('# -*- coding: utf-8 -*-\n')
- for texi_file in texi_files:
- # Urgly: scan ly comments and variable names only in English doco
- is_english_doc = 'Documentation/user' in texi_file
- process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
- os.path.basename (texi_file), node_list,
- scan_ly=is_english_doc)
- for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'):
- node_list.write ('_(r"' + word + '")\n')
- node_list.close ()
- os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename)
-else:
- for texi_file in texi_files:
- process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
- os.path.basename (texi_file))
+++ /dev/null
-#!/usr/bin/env python
-# texi-skeleton-update.py
-
-import sys
-import glob
-import os
-import shutil
-
-sys.stderr.write ('texi-skeleton-update.py\n')
-
-orig_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[1] + '/*.ite??')])
-new_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[2] + '/*.ite??')])
-
-for f in new_skeletons:
- if f in orig_skeletons:
- g = open (os.path.join (sys.argv[1], f), 'r').read ()
- if '-- SKELETON FILE --' in g:
- sys.stderr.write ("Updating %s...\n" % f)
- shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
- elif f != 'fdl.itexi':
- sys.stderr.write ("Copying new file %s...\n" % f)
- shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
-
-for f in orig_skeletons.difference (new_skeletons):
- sys.stderr.write ("Warning: outdated skeleton file %s\n" % f)
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-USAGE: translations-status.py BUILDSCRIPT-DIR LOCALEDIR
-
- This script must be run from Documentation/
-
- Reads template files translations.template.html.in
-and for each LANG in LANGUAGES LANG/translations.template.html.in
- Writes translations.html.in and for each LANG in LANGUAGES
-translations.LANG.html.in
- Writes out/translations-status.txt
- Updates word counts in TRANSLATION
-"""
-
-import sys
-import re
-import string
-import os
-
-import langdefs
-import buildlib
-
-def progress (str):
- sys.stderr.write (str + '\n')
-
-progress ("translations-status.py")
-
-_doc = lambda s: s
-
-# load gettext messages catalogs
-translation = langdefs.translation
-
-
-language_re = re.compile (r'^@documentlanguage (.+)', re.M)
-comments_re = re.compile (r'^@ignore\n(.|\n)*?\n@end ignore$|@c .*?$', re.M)
-space_re = re.compile (r'\s+', re.M)
-lilypond_re = re.compile (r'@lilypond({.*?}|(.|\n)*?\n@end lilypond$)', re.M)
-node_re = re.compile ('^@node .*?$', re.M)
-title_re = re.compile ('^@(top|chapter|(?:sub){0,2}section|' + \
-'(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?) (.*?)$', re.M)
-include_re = re.compile ('^@include (.*?)$', re.M)
-
-translators_re = re.compile (r'^@c\s+Translators\s*:\s*(.*?)$', re.M | re.I)
-checkers_re = re.compile (r'^@c\s+Translation\s*checkers\s*:\s*(.*?)$',
- re.M | re.I)
-status_re = re.compile (r'^@c\s+Translation\s*status\s*:\s*(.*?)$', re.M | re.I)
-post_gdp_re = re.compile ('post.GDP', re.I)
-untranslated_node_str = '@untranslated'
-skeleton_str = '-- SKELETON FILE --'
-
-section_titles_string = _doc ('Section titles')
-last_updated_string = _doc (' <p><i>Last updated %s</i></p>\n')
-detailed_status_heads = [_doc ('Translators'), _doc ('Translation checkers'),
- _doc ('Translated'), _doc ('Up to date'),
- _doc ('Other info')]
-format_table = {
- 'not translated': {'color':'d0f0f8', 'short':_doc ('no'), 'abbr':'NT',
- 'long':_doc ('not translated')},
- 'partially translated': {'color':'dfef77',
- 'short':_doc ('partially (%(p)d %%)'),
- 'abbr':'%(p)d%%',
- 'long':_doc ('partially translated (%(p)d %%)')},
- 'fully translated': {'color':'1fff1f', 'short':_doc ('yes'), 'abbr':'FT',
- 'long': _doc ('translated')},
- 'up to date': {'short':_doc ('yes'), 'long':_doc ('up to date'),
- 'abbr':'100%%', 'vague':_doc ('up to date')},
- 'outdated': {'short':_doc ('partially'), 'abbr':'%(p)d%%',
- 'vague':_doc ('partially up to date')},
- 'N/A': {'short':_doc ('N/A'), 'abbr':'N/A', 'color':'d587ff', 'vague':''},
- 'pre-GDP':_doc ('pre-GDP'),
- 'post-GDP':_doc ('post-GDP')
-}
-
-texi_level = {
-# (Unumbered/Numbered/Lettered, level)
- 'top': ('u', 0),
- 'unnumbered': ('u', 1),
- 'unnumberedsec': ('u', 2),
- 'unnumberedsubsec': ('u', 3),
- 'chapter': ('n', 1),
- 'section': ('n', 2),
- 'subsection': ('n', 3),
- 'appendix': ('l', 1)
-}
-
-appendix_number_trans = string.maketrans ('@ABCDEFGHIJKLMNOPQRSTUVWXY',
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
-
-class SectionNumber (object):
- def __init__ (self):
- self.__data = [[0,'u']]
-
- def __increase_last_index (self):
- type = self.__data[-1][1]
- if type == 'l':
- self.__data[-1][0] = \
- self.__data[-1][0].translate (appendix_number_trans)
- elif type == 'n':
- self.__data[-1][0] += 1
-
- def format (self):
- if self.__data[-1][1] == 'u':
- return ''
- return '.'.join ([str (i[0]) for i in self.__data if i[1] != 'u']) + ' '
-
- def increase (self, (type, level)):
- if level == 0:
- self.__data = [[0,'u']]
- while level + 1 < len (self.__data):
- del self.__data[-1]
- if level + 1 > len (self.__data):
- self.__data.append ([0, type])
- if type == 'l':
- self.__data[-1][0] = '@'
- if type == self.__data[-1][1]:
- self.__increase_last_index ()
- else:
- self.__data[-1] = ([0, type])
- if type == 'l':
- self.__data[-1][0] = 'A'
- elif type == 'n':
- self.__data[-1][0] = 1
- return self.format ()
-
-
-def percentage_color (percent):
- p = percent / 100.0
- if p < 0.33:
- c = [hex (int (3 * p * b + (1 - 3 * p) * a))[2:]
- for (a, b) in [(0xff, 0xff), (0x5c, 0xa6), (0x5c, 0x4c)]]
- elif p < 0.67:
- c = [hex (int ((3 * p - 1) * b + (2 - 3 * p) * a))[2:]
- for (a, b) in [(0xff, 0xff), (0xa6, 0xff), (0x4c, 0x3d)]]
- else:
- c = [hex (int ((3 * p - 2) * b + 3 * (1 - p) * a))[2:]
- for (a, b) in [(0xff, 0x1f), (0xff, 0xff), (0x3d, 0x1f)]]
- return ''.join (c)
-
-
-def update_word_count (text, filename, word_count):
- return re.sub (r'(?m)^(\d+) *' + filename,
- str (word_count).ljust (6) + filename,
- text)
-
-po_msgid_re = re.compile (r'^msgid "(.*?)"(?:\n"(.*?)")*', re.M)
-
-def po_word_count (po_content):
- s = ' '.join ([''.join (t) for t in po_msgid_re.findall (po_content)])
- return len (space_re.split (s))
-
-sgml_tag_re = re.compile (r'<.*?>', re.S)
-
-def sgml_word_count (sgml_doc):
- s = sgml_tag_re.sub ('', sgml_doc)
- return len (space_re.split (s))
-
-def tely_word_count (tely_doc):
- '''
- Calculate word count of a Texinfo document node by node.
-
- Take string tely_doc as an argument.
- Return a list of integers.
-
- Texinfo comments and @lilypond blocks are not included in word counts.
- '''
- tely_doc = comments_re.sub ('', tely_doc)
- tely_doc = lilypond_re.sub ('', tely_doc)
- nodes = node_re.split (tely_doc)
- return [len (space_re.split (n)) for n in nodes]
-
-
-class TelyDocument (object):
- def __init__ (self, filename):
- self.filename = filename
- self.contents = open (filename).read ()
-
- ## record title and sectionning level of first Texinfo section
- m = title_re.search (self.contents)
- if m:
- self.title = m.group (2)
- self.level = texi_level [m.group (1)]
- else:
- self.title = 'Untitled'
- self.level = ('u', 1)
-
- m = language_re.search (self.contents)
- if m:
- self.language = m.group (1)
-
- included_files = [os.path.join (os.path.dirname (filename), t)
- for t in include_re.findall (self.contents)]
- self.included_files = [p for p in included_files if os.path.exists (p)]
-
- def print_title (self, section_number):
- return section_number.increase (self.level) + self.title
-
-
-class TranslatedTelyDocument (TelyDocument):
- def __init__ (self, filename, masterdocument, parent_translation=None):
- TelyDocument.__init__ (self, filename)
-
- self.masterdocument = masterdocument
- if not hasattr (self, 'language') \
- and hasattr (parent_translation, 'language'):
- self.language = parent_translation.language
- if hasattr (self, 'language'):
- self.translation = translation[self.language]
- else:
- self.translation = lambda x: x
- self.title = self.translation (self.title)
-
- ## record authoring information
- m = translators_re.search (self.contents)
- if m:
- self.translators = [n.strip () for n in m.group (1).split (',')]
- else:
- self.translators = parent_translation.translators
- m = checkers_re.search (self.contents)
- if m:
- self.checkers = [n.strip () for n in m.group (1).split (',')]
- elif isinstance (parent_translation, TranslatedTelyDocument):
- self.checkers = parent_translation.checkers
- else:
- self.checkers = []
-
- ## check whether translation is pre- or post-GDP
- m = status_re.search (self.contents)
- if m:
- self.post_gdp = bool (post_gdp_re.search (m.group (1)))
- else:
- self.post_gdp = False
-
- ## record which parts (nodes) of the file are actually translated
- self.partially_translated = not skeleton_str in self.contents
- nodes = node_re.split (self.contents)
- self.translated_nodes = [not untranslated_node_str in n for n in nodes]
-
- ## calculate translation percentage
- master_total_word_count = sum (masterdocument.word_count)
- translation_word_count = \
- sum ([masterdocument.word_count[k] * self.translated_nodes[k]
- for k in range (min (len (masterdocument.word_count),
- len (self.translated_nodes)))])
- self.translation_percentage = \
- 100 * translation_word_count / master_total_word_count
-
- ## calculate how much the file is outdated
- (diff_string, error) = \
- buildlib.check_translated_doc (masterdocument.filename, self.filename, self.contents)
- if error:
- sys.stderr.write ('warning: %s: %s' % (self.filename, error))
- self.uptodate_percentage = None
- else:
- diff = diff_string.splitlines ()
- insertions = sum ([len (l) - 1 for l in diff
- if l.startswith ('+')
- and not l.startswith ('+++')])
- deletions = sum ([len (l) - 1 for l in diff
- if l.startswith ('-')
- and not l.startswith ('---')])
- outdateness_percentage = 50.0 * (deletions + insertions) / \
- (masterdocument.size + 0.5 * (deletions - insertions))
- self.uptodate_percentage = 100 - int (outdateness_percentage)
- if self.uptodate_percentage > 100:
- alternative = 50
- progress ("%s: strange uptodateness percentage %d %%, \
-setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
- self.uptodate_percentage = alternative
- elif self.uptodate_percentage < 1:
- alternative = 1
- progress ("%s: strange uptodateness percentage %d %%, \
-setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
- self.uptodate_percentage = alternative
-
- def completeness (self, formats=['long'], translated=False):
- if translated:
- translation = self.translation
- else:
- translation = lambda x: x
-
- if isinstance (formats, str):
- formats = [formats]
- p = self.translation_percentage
- if p == 0:
- status = 'not translated'
- elif p == 100:
- status = 'fully translated'
- else:
- status = 'partially translated'
- return dict ([(f, translation (format_table[status][f]) % locals())
- for f in formats])
-
- def uptodateness (self, formats=['long'], translated=False):
- if translated:
- translation = self.translation
- else:
- translation = lambda x: x
-
- if isinstance (formats, str):
- formats = [formats]
- p = self.uptodate_percentage
- if p == None:
- status = 'N/A'
- elif p == 100:
- status = 'up to date'
- else:
- status = 'outdated'
- l = {}
- for f in formats:
- if f == 'color' and p != None:
- l['color'] = percentage_color (p)
- else:
- l[f] = translation (format_table[status][f]) % locals ()
- return l
-
- def gdp_status (self):
- if self.post_gdp:
- return self.translation (format_table['post-GDP'])
- else:
- return self.translation (format_table['pre-GDP'])
-
- def short_html_status (self):
- s = ' <td>'
- if self.partially_translated:
- s += '<br>\n '.join (self.translators) + '<br>\n'
- if self.checkers:
- s += ' <small>' + \
- '<br>\n '.join (self.checkers) + '</small><br>\n'
-
- c = self.completeness (['color', 'long'])
- s += ' <span style="background-color: #%(color)s">\
-%(long)s</span><br>\n' % c
-
- if self.partially_translated:
- u = self.uptodateness (['vague', 'color'])
- s += ' <span style="background-color: #%(color)s">\
-%(vague)s</span><br>\n' % u
-
- s += ' </td>\n'
- return s
-
- def text_status (self):
- s = self.completeness ('abbr')['abbr'] + ' '
-
- if self.partially_translated:
- s += self.uptodateness ('abbr')['abbr'] + ' '
- return s
-
- def html_status (self, numbering=SectionNumber ()):
- if self.title == 'Untitled':
- return ''
-
- if self.level[1] == 0: # if self is a master document
- s = '''<table align="center" border="2">
- <tr align="center">
- <th>%s</th>''' % self.print_title (numbering)
- s += ''.join ([' <th>%s</th>\n' % self.translation (h)
- for h in detailed_status_heads])
- s += ' </tr>\n'
- s += ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
- % (self.translation (section_titles_string),
- sum (self.masterdocument.word_count))
-
- else:
- s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
- % (self.print_title (numbering),
- sum (self.masterdocument.word_count))
-
- if self.partially_translated:
- s += ' <td>' + '<br>\n '.join (self.translators) + '</td>\n'
- s += ' <td>' + '<br>\n '.join (self.checkers) + '</td>\n'
- else:
- s += ' <td></td>\n' * 2
-
- c = self.completeness (['color', 'short'], translated=True)
- s += ' <td><span style="background-color: #%(color)s">\
-%(short)s</span></td>\n' % {'color': c['color'],
- 'short': c['short']}
-
- if self.partially_translated:
- u = self.uptodateness (['short', 'color'], translated=True)
- s += ' <td><span style="background-color: #%(color)s">\
-%(short)s</span></td>\n' % {'color': u['color'],
- 'short': u['short']}
- else:
- s += ' <td></td>\n'
-
- s += ' <td>' + self.gdp_status () + '</td>\n </tr>\n'
- s += ''.join ([i.translations[self.language].html_status (numbering)
- for i in self.masterdocument.includes
- if self.language in i.translations])
-
- if self.level[1] == 0: # if self is a master document
- s += '</table>\n<p></p>\n'
- return s
-
-class MasterTelyDocument (TelyDocument):
- def __init__ (self,
- filename,
- parent_translations=dict ([(lang, None)
- for lang in langdefs.LANGDICT])):
- TelyDocument.__init__ (self, filename)
- self.size = len (self.contents)
- self.word_count = tely_word_count (self.contents)
- translations = dict ([(lang, os.path.join (lang, filename))
- for lang in langdefs.LANGDICT])
- self.translations = \
- dict ([(lang,
- TranslatedTelyDocument (translations[lang],
- self, parent_translations.get (lang)))
- for lang in langdefs.LANGDICT
- if os.path.exists (translations[lang])])
- if self.translations:
- self.includes = [MasterTelyDocument (f, self.translations)
- for f in self.included_files]
- else:
- self.includes = []
-
- def update_word_counts (self, s):
- s = update_word_count (s, self.filename, sum (self.word_count))
- for i in self.includes:
- s = i.update_word_counts (s)
- return s
-
- def html_status (self, numbering=SectionNumber ()):
- if self.title == 'Untitled' or not self.translations:
- return ''
- if self.level[1] == 0: # if self is a master document
- s = '''<table align="center" border="2">
- <tr align="center">
- <th>%s</th>''' % self.print_title (numbering)
- s += ''.join ([' <th>%s</th>\n' % l for l in self.translations])
- s += ' </tr>\n'
- s += ' <tr align="left">\n <td>Section titles<br>(%d)</td>\n' \
- % sum (self.word_count)
-
- else: # if self is an included file
- s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
- % (self.print_title (numbering), sum (self.word_count))
-
- s += ''.join ([t.short_html_status ()
- for t in self.translations.values ()])
- s += ' </tr>\n'
- s += ''.join ([i.html_status (numbering) for i in self.includes])
-
- if self.level[1] == 0: # if self is a master document
- s += '</table>\n<p></p>\n'
- return s
-
- def text_status (self, numbering=SectionNumber (), colspec=[48,12]):
- if self.title == 'Untitled' or not self.translations:
- return ''
-
- s = ''
- if self.level[1] == 0: # if self is a master document
- s += (self.print_title (numbering) + ' ').ljust (colspec[0])
- s += ''.join (['%s'.ljust (colspec[1]) % l
- for l in self.translations])
- s += '\n'
- s += ('Section titles (%d)' % \
- sum (self.word_count)).ljust (colspec[0])
-
- else:
- s = '%s (%d) ' \
- % (self.print_title (numbering), sum (self.word_count))
- s = s.ljust (colspec[0])
-
- s += ''.join ([t.text_status ().ljust(colspec[1])
- for t in self.translations.values ()])
- s += '\n\n'
- s += ''.join ([i.text_status (numbering) for i in self.includes])
-
- if self.level[1] == 0:
- s += '\n'
- return s
-
-
-update_category_word_counts_re = re.compile (r'(?ms)^-(\d+)-(.*?\n)\d+ *total')
-
-counts_re = re.compile (r'(?m)^(\d+) ')
-
-def update_category_word_counts_sub (m):
- return '-' + m.group (1) + '-' + m.group (2) + \
- str (sum ([int (c)
- for c in counts_re.findall (m.group (2))])).ljust (6) + \
- 'total'
-
-
-progress ("Reading documents...")
-
-tely_files = \
- buildlib.read_pipe ("find -maxdepth 2 -name '*.tely'")[0].splitlines ()
-tely_files.sort ()
-master_docs = [MasterTelyDocument (os.path.normpath (filename))
- for filename in tely_files]
-master_docs = [doc for doc in master_docs if doc.translations]
-
-main_status_page = open ('translations.template.html.in').read ()
-
-enabled_languages = [l for l in langdefs.LANGDICT
- if langdefs.LANGDICT[l].enabled
- and l != 'en']
-lang_status_pages = \
- dict ([(l, open (os.path.join (l, 'translations.template.html.in')). read ())
- for l in enabled_languages])
-
-progress ("Generating status pages...")
-
-date_time = buildlib.read_pipe ('LANG= date -u')[0]
-
-main_status_html = last_updated_string % date_time
-main_status_html += '\n'.join ([doc.html_status () for doc in master_docs])
-
-html_re = re.compile ('<html>', re.I)
-end_body_re = re.compile ('</body>', re.I)
-
-html_header = '''<html>
-<!-- This page is automatically generated by translation-status.py from
-translations.template.html.in; DO NOT EDIT !-->'''
-
-main_status_page = html_re.sub (html_header, main_status_page)
-
-main_status_page = end_body_re.sub (main_status_html + '\n</body>',
- main_status_page)
-
-open ('translations.html.in', 'w').write (main_status_page)
-
-for l in enabled_languages:
- date_time = buildlib.read_pipe ('LANG=%s date -u' % l)[0]
- lang_status_pages[l] = translation[l] (last_updated_string) % date_time + lang_status_pages[l]
- lang_status_page = html_re.sub (html_header, lang_status_pages[l])
- html_status = '\n'.join ([doc.translations[l].html_status ()
- for doc in master_docs
- if l in doc.translations])
- lang_status_page = end_body_re.sub (html_status + '\n</body>',
- lang_status_page)
- open (os.path.join (l, 'translations.html.in'), 'w').write (lang_status_page)
-
-main_status_txt = '''Documentation translations status
-Generated %s
-NT = not translated
-FT = fully translated
-
-''' % date_time
-
-main_status_txt += '\n'.join ([doc.text_status () for doc in master_docs])
-
-status_txt_file = 'out/translations-status.txt'
-progress ("Writing %s..." % status_txt_file)
-open (status_txt_file, 'w').write (main_status_txt)
-
-translation_instructions_file = 'TRANSLATION'
-progress ("Updating %s..." % translation_instructions_file)
-translation_instructions = open (translation_instructions_file).read ()
-
-for doc in master_docs:
- translation_instructions = doc.update_word_counts (translation_instructions)
-
-for html_file in re.findall (r'(?m)^\d+ *(\S+?\.html\S*?)(?: |$)',
- translation_instructions):
- word_count = sgml_word_count (open (html_file).read ())
- translation_instructions = update_word_count (translation_instructions,
- html_file,
- word_count)
-
-for po_file in re.findall (r'(?m)^\d+ *(\S+?\.po\S*?)(?: |$)',
- translation_instructions):
- word_count = po_word_count (open (po_file).read ())
- translation_instructions = update_word_count (translation_instructions,
- po_file,
- word_count)
-
-translation_instructions = \
- update_category_word_counts_re.sub (update_category_word_counts_sub,
- translation_instructions)
-
-open (translation_instructions_file, 'w').write (translation_instructions)
+++ /dev/null
-#!/usr/bin/env python
-# update-snippets.py
-
-# USAGE: update-snippets.py REFERENCE-DIR TARGET-DIR FILES
-#
-# update ly snippets in TARGET-DIR/FILES with snippets from REFERENCE-DIR/FILES
-#
-# More precisely, each existing FILE in TARGET-DIR is matched to the FILE in
-# REFERENCE-DIR (it the latter does not exist, a warning is given).
-#
-# Shell wildcards expansion is performed on FILES.
-# This script currently supports Texinfo format.
-# Ly snippets preceded with a line containing '@c KEEP LY' in TARGET-DIR/FILES
-# will not be updated.
-# An error occurs if REFERENCE-DIR/FILE and TARGET-DIR/FILE do not have the
-# same snippets count.
-
-import sys
-import os
-import glob
-import re
-
-print "update-snippets.py"
-
-comment_re = re.compile (r'(?<!@)(@c(?:omment)? .*?\n|^@ignore\n.*?\n@end ignore\n)', re.M | re.S)
-snippet_re = re.compile (r'^(@lilypond(?:file)?(?:\[.*?\])?\s*\{.+?\}|@lilypond(?:\[.*?\])?(?:.|\n)+?@end lilypond)', re.M)
-
-
-def snippet_split (l):
- r = []
- for s in [s for s in l if s]:
- if s.startswith ('@c ') or s.startswith ('@ignore\n') or s.startswith ('@comment '):
- r.append(s)
- else:
- r += [t for t in snippet_re.split (s) if t]
- return r
-
-def count_snippet (l):
- k = 0
- for s in l:
- if s.startswith ('@lilypond'):
- k += 1
- return k
-
-def find_next_snippet (l, k):
- while not l[k].startswith ('@lilypond'):
- k += 1
- return k
-
-exit_code = 0
-
-def update_exit_code (code):
- global exit_code
- exit_code = max (code, exit_code)
-
-ref_dir, target_dir = sys.argv [1:3]
-file_patterns = sys.argv[3:]
-
-total_snippet_count = 0
-changed_snippets_count = 0
-
-for pattern in file_patterns:
- files = glob.glob (os.path.join (target_dir, pattern))
- for file in files:
- ref_file = os.path.join (ref_dir, os.path.basename (file))
- if not os.path.isfile (ref_file):
- sys.stderr.write ("Warning: %s: no such file.\nReference file for %s not found.\n" % (ref_file, file))
- continue
- f = open (file, 'r')
- target_source = comment_re.split (f.read ())
- f.close ()
- if reduce (lambda x, y: x or y, ['-- SKELETON FILE --' in s for s in target_source]):
- sys.stderr.write ("Skipping skeleton file %s\n" % file)
- continue
- g = open (ref_file, 'r')
- ref_source = comment_re.split (g.read ())
- target_source = snippet_split (target_source)
- ref_source = snippet_split (ref_source)
- if '' in target_source or '' in ref_source:
- raise "AAAAARGH: unuseful empty string"
- snippet_count = count_snippet (target_source)
- if not snippet_count == count_snippet (ref_source):
- update_exit_code (1)
- sys.stderr.write ("Error: %s and %s have different snippet counts.\n\
-Update translation by at least adding a @lilypond block where necessary, then rerun this script.\n" % (ref_file, file))
- continue
- total_snippet_count += snippet_count
- c = 0
- k = -1
- for j in range (len (target_source)):
- if target_source[j].startswith ('@lilypond'):
- k = find_next_snippet (ref_source, k+1)
- if j > 0 and (not target_source[j-1].startswith ('@c KEEP LY')) and target_source[j] != ref_source[k]:
- target_source[j] = ref_source[k]
- c += 1
- changed_snippets_count += 1
- f = open (file, 'w')
- f.write (''.join (target_source))
- sys.stderr.write ('%s: %d/%d snippets updated\n' % (file, c, snippet_count))
-
-sys.stderr.write ('\nTotal: %d snippets, %d updated snippets.\n' % (total_snippet_count, changed_snippets_count))
-sys.exit (exit_code)
--- /dev/null
+depth=../..
+
+EXTRA_DIST_FILES = $(call src-wildcard,*.sh) $(call src-wildcard,*.py)
+EXTRA_DIST_FILES += pfx2ttf.fontforge
+
+include $(depth)/make/stepmake.make
+
+default:
--- /dev/null
+#!/bin/sh
+
+if test "$1" == "--fresh"; then
+ fresh=yes
+fi
+
+if test ! -f config-cov.make; then
+ fresh=yes
+fi
+
+if test "$fresh" = "yes";
+then
+ ./configure --enable-config=cov --disable-optimising \
+ && make conf=cov -j2 clean \
+ && perl -i~ -pe 's/-pipe /-fprofile-arcs -ftest-coverage -pipe /g' config-cov.make \
+ && perl -i~ -pe 's/ -ldl / -lgcov -ldl /g' config-cov.make
+else
+ find -name '*.gcda' -exec rm '{}' ';'
+fi
+
+mkdir -p scripts/out-cov/
+touch scripts/out-cov/midi2ly scripts/out-cov/midi2ly.1
+make conf=cov -j2 && \
+ make conf=cov test-clean OUT_TEST=testcov LILYPOND_JOBS= && \
+ make conf=cov test OUT_TEST=testcov LILYPOND_JOBS='-dtrace-scheme-coverage '
+
+if test "$?" != "0"; then
+ tail -100 out-cov/test-run.log
+ exit 1
+fi
+
+depth=../..
+resultdir=out/coverage-results
+
+rm -rf $resultdir
+mkdir $resultdir
+cd $resultdir
+
+ln $depth/lily/* .
+ln $depth/scm/*.scm .
+mv $depth/input/regression/out-testcov/*.scm.cov .
+ln $depth/ly/*.ly .
+ln $depth/lily/out-cov/*[ch] .
+mkdir include
+ln $depth/lily/include/* include/
+ln $depth/flower/include/* include/
+for a in *[cl] *.yy
+do
+ gcov -o $depth/lily/out-cov/ -p $a > $a.gcov-summary
+done
+
+$depth/scripts/auxiliar/coverage.py --uncovered *.cc > uncovered.txt
+$depth/scripts/auxiliar/coverage.py --hotspots *.cc > hotspots.txt
+$depth/scripts/auxiliar/coverage.py --summary *.cc > summary.txt
+$depth/scripts/auxiliar/coverage.py --uncovered *.scm > uncovered-scheme.txt
+
+head -20 summary.txt
+
+cat <<EOF
+results in
+
+ out/coverage-results/summary.txt
+ out/coverage-results/uncovered.txt
+ out/coverage-results/uncovered-scheme.txt
+ out/coverage-results/hotspots.txt
+
+EOF
--- /dev/null
+#!/bin/sh
+
+if test "$1" == "--fresh"; then
+ fresh=yes
+fi
+
+if test ! -f config-prof.make; then
+ fresh=yes
+fi
+
+if test "$fresh" = "yes";
+then
+ ./configure --enable-config=prof --enable-optimising \
+ && perl -i~ -pe 's/-pipe /-pg -pipe /g' config-prof.make \
+ && perl -i~ -pe 's/ -ldl / -pg -ldl /g' config-prof.make
+fi
+
+make conf=prof -j2
+
+if test "$?" != "0"; then
+ exit 2
+fi
+
+depth=../..
+resultdir=out/profile-results
+
+rm -rf $resultdir
+mkdir $resultdir
+cd $resultdir
+
+
+cat > long-score.ly << EOF
+\version "2.10.0"
+foo = \new Staff \new Voice \repeat unfold 50 \relative { c4 d8[ d16( e]~ e16[ e e) f] g8 }
+\score {
+ \new ChoirStaff <<
+ \foo \foo \foo \foo
+ \foo \foo \foo \foo
+
+ >>
+ \midi {}
+ \layout {}
+}
+EOF
+
+rm gmon.sum
+
+exe=$depth/out-prof/bin/lilypond
+
+## todo: figure out representative sample.
+files="wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 wtk1-fugue2 mozart-hrn-3 mozart-hrn-3 long-score"
+
+
+
+$exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
+ -I $depth/input/mutopia/W.A.Mozart/ \
+ $files
+
+
+for a in *.profile; do
+ echo $a
+ cat $a
+done
+
+echo 'running gprof'
+gprof $exe > profile
+
+exit 0
+
+
+## gprof -s takes forever.
+for a in seq 1 3; do
+ for f in $files ; do
+ $exe -ddump-profile --formats=ps -I $depth/input/ -I $depth/input/mutopia/J.S.Bach/ \
+ -I $depth/input/mutopia/W.A.Mozart/ \
+ $f
+
+ echo 'running gprof'
+ if test -f gmon.sum ; then
+ gprof -s $exe gmon.out gmon.sum
+ else
+ mv gmon.out gmon.sum
+ fi
+ done
+done
+
+gprof $exe gmon.sum > profile
--- /dev/null
+#!/usr/bin/env python
+
+"""
+check_texi_refs.py
+Interactive Texinfo cross-references checking and fixing tool
+
+"""
+
+
+import sys
+import re
+import os
+import optparse
+import imp
+
+outdir = 'out-www'
+
+log = sys.stderr
+stdout = sys.stdout
+
+file_not_found = 'file not found in include path'
+
+warn_not_fixed = '*** Warning: this broken x-ref has not been fixed!\n'
+
+opt_parser = optparse.OptionParser (usage='check_texi_refs.py [OPTION]... FILE',
+ description='''Check and fix \
+cross-references in a collection of Texinfo
+documents heavily cross-referenced each other.
+''')
+
+opt_parser.add_option ('-a', '--auto-fix',
+ help="Automatically fix cross-references whenever \
+it is possible",
+ action='store_true',
+ dest='auto_fix',
+ default=False)
+
+opt_parser.add_option ('-b', '--batch',
+ help="Do not run interactively",
+ action='store_false',
+ dest='interactive',
+ default=True)
+
+opt_parser.add_option ('-c', '--check-comments',
+ help="Also check commented out x-refs",
+ action='store_true',
+ dest='check_comments',
+ default=False)
+
+opt_parser.add_option ('-p', '--check-punctuation',
+ help="Check punctuation after x-refs",
+ action='store_true',
+ dest='check_punctuation',
+ default=False)
+
+opt_parser.add_option ("-I", '--include', help="add DIR to include path",
+ metavar="DIR",
+ action='append', dest='include_path',
+ default=[os.path.abspath (os.getcwd ())])
+
+(options, files) = opt_parser.parse_args ()
+
+class InteractionError (Exception):
+ pass
+
+
+manuals_defs = imp.load_source ('manuals_defs', files[0])
+manuals = {}
+
+def find_file (name, prior_directory='.'):
+ p = os.path.join (prior_directory, name)
+ out_p = os.path.join (prior_directory, outdir, name)
+ if os.path.isfile (p):
+ return p
+ elif os.path.isfile (out_p):
+ return out_p
+
+ # looking for file in include_path
+ for d in options.include_path:
+ p = os.path.join (d, name)
+ if os.path.isfile (p):
+ return p
+
+ # file not found in include_path: looking in `outdir' subdirs
+ for d in options.include_path:
+ p = os.path.join (d, outdir, name)
+ if os.path.isfile (p):
+ return p
+
+ raise EnvironmentError (1, file_not_found, name)
+
+
+exit_code = 0
+
+def set_exit_code (n):
+ global exit_code
+ exit_code = max (exit_code, n)
+
+
+if options.interactive:
+ try:
+ import readline
+ except:
+ pass
+
+ def yes_prompt (question, default=False, retries=3):
+ d = {True: 'y', False: 'n'}.get (default, False)
+ while retries:
+ a = raw_input ('%s [default: %s]' % (question, d) + '\n')
+ if a.lower ().startswith ('y'):
+ return True
+ if a.lower ().startswith ('n'):
+ return False
+ if a == '' or retries < 0:
+ return default
+ stdout.write ("Please answer yes or no.\n")
+ retries -= 1
+
+ def search_prompt ():
+ """Prompt user for a substring to look for in node names.
+
+If user input is empty or matches no node name, return None,
+otherwise return a list of (manual, node name, file) tuples.
+
+"""
+ substring = raw_input ("Enter a substring to search in node names \
+(press Enter to skip this x-ref):\n")
+ if not substring:
+ return None
+ substring = substring.lower ()
+ matches = []
+ for k in manuals:
+ matches += [(k, node, manuals[k]['nodes'][node][0])
+ for node in manuals[k]['nodes']
+ if substring in node.lower ()]
+ return matches
+
+else:
+ def yes_prompt (question, default=False, retries=3):
+ return default
+
+ def search_prompt ():
+ return None
+
+
+ref_re = re.compile \
+ ('@(ref|ruser|rlearning|rprogram|rglos)(?:\\{(?P<ref>[^,\\\\\\}]+?)|\
+named\\{(?P<refname>[^,\\\\]+?),(?P<display>[^,\\\\\\}]+?))\\}(?P<last>.)',
+ re.DOTALL)
+node_include_re = re.compile (r'(?m)^@(node|include)\s+(.+?)$')
+
+whitespace_re = re.compile (r'\s+')
+line_start_re = re.compile ('(?m)^')
+
+def which_line (index, newline_indices):
+ """Calculate line number of a given string index
+
+Return line number of string index index, where
+newline_indices is an ordered iterable of all newline indices.
+"""
+ inf = 0
+ sup = len (newline_indices) - 1
+ n = len (newline_indices)
+ while inf + 1 != sup:
+ m = (inf + sup) / 2
+ if index >= newline_indices [m]:
+ inf = m
+ else:
+ sup = m
+ return inf + 1
+
+
+comments_re = re.compile ('(?<!@)(@c(?:omment)? \
+.*?\\n|^@ignore\\n.*?\\n@end ignore\\n)', re.M | re.S)
+
+def calc_comments_boundaries (texinfo_doc):
+ return [(m.start (), m.end ()) for m in comments_re.finditer (texinfo_doc)]
+
+
+def is_commented_out (start, end, comments_boundaries):
+ for k in range (len (comments_boundaries)):
+ if (start > comments_boundaries[k][0]
+ and end <= comments_boundaries[k][1]):
+ return True
+ elif end <= comments_boundaries[k][0]:
+ return False
+ return False
+
+
+def read_file (f, d):
+ s = open (f).read ()
+ base = os.path.basename (f)
+ dir = os.path.dirname (f)
+
+ d['contents'][f] = s
+
+ d['newline_indices'][f] = [m.end () for m in line_start_re.finditer (s)]
+ if options.check_comments:
+ d['comments_boundaries'][f] = []
+ else:
+ d['comments_boundaries'][f] = calc_comments_boundaries (s)
+
+ for m in node_include_re.finditer (s):
+ if m.group (1) == 'node':
+ line = which_line (m.start (), d['newline_indices'][f])
+ d['nodes'][m.group (2)] = (f, line)
+
+ elif m.group (1) == 'include':
+ try:
+ p = find_file (m.group (2), dir)
+ except EnvironmentError, (errno, strerror):
+ if strerror == file_not_found:
+ continue
+ else:
+ raise
+ read_file (p, d)
+
+
+def read_manual (name):
+ """Look for all node names and cross-references in a Texinfo document
+
+Return a (manual, dictionary) tuple where manual is the cross-reference
+macro name defined by references_dict[name], and dictionary
+has the following keys:
+
+ 'nodes' is a dictionary of `node name':(file name, line number),
+
+ 'contents' is a dictionary of file:`full file contents',
+
+ 'newline_indices' is a dictionary of
+file:[list of beginning-of-line string indices],
+
+ 'comments_boundaries' is a list of (start, end) tuples,
+which contain string indices of start and end of each comment.
+
+Included files that can be found in the include path are processed too.
+
+"""
+ d = {}
+ d['nodes'] = {}
+ d['contents'] = {}
+ d['newline_indices'] = {}
+ d['comments_boundaries'] = {}
+ manual = manuals_defs.references_dict.get (name, '')
+ try:
+ f = find_file (name + '.tely')
+ except EnvironmentError, (errno, strerror):
+ if not strerror == file_not_found:
+ raise
+ else:
+ try:
+ f = find_file (name + '.texi')
+ except EnvironmentError, (errno, strerror):
+ if strerror == file_not_found:
+ sys.stderr.write (name + '.{texi,tely}: ' +
+ file_not_found + '\n')
+ return (manual, d)
+ else:
+ raise
+
+ log.write ("Processing manual %s (%s)\n" % (f, manual))
+ read_file (f, d)
+ return (manual, d)
+
+
+log.write ("Reading files...\n")
+
+manuals = dict ([read_manual (name)
+ for name in manuals_defs.references_dict.keys ()])
+
+ref_fixes = set ()
+bad_refs_count = 0
+fixes_count = 0
+
+def add_fix (old_type, old_ref, new_type, new_ref):
+ ref_fixes.add ((old_type, old_ref, new_type, new_ref))
+
+
+def lookup_fix (r):
+ found = []
+ for (old_type, old_ref, new_type, new_ref) in ref_fixes:
+ if r == old_ref:
+ found.append ((new_type, new_ref))
+ return found
+
+
+def preserve_linebreak (text, linebroken):
+ if linebroken:
+ if ' ' in text:
+ text = text.replace (' ', '\n', 1)
+ n = ''
+ else:
+ n = '\n'
+ else:
+ n = ''
+ return (text, n)
+
+
+def choose_in_numbered_list (message, string_list, sep=' ', retries=3):
+ S = set (string_list)
+ S.discard ('')
+ string_list = list (S)
+ numbered_list = sep.join ([str (j + 1) + '. ' + string_list[j]
+ for j in range (len (string_list))]) + '\n'
+ t = retries
+ while t > 0:
+ value = ''
+ stdout.write (message +
+ "(press Enter to discard and start a new search)\n")
+ input = raw_input (numbered_list)
+ if not input:
+ return ''
+ try:
+ value = string_list[int (input) - 1]
+ except IndexError:
+ stdout.write ("Error: index number out of range\n")
+ except ValueError:
+ matches = [input in v for v in string_list]
+ n = matches.count (True)
+ if n == 0:
+ stdout.write ("Error: input matches no item in the list\n")
+ elif n > 1:
+ stdout.write ("Error: ambiguous input (matches several items \
+in the list)\n")
+ else:
+ value = string_list[matches.index (True)]
+ if value:
+ return value
+ t -= 1
+ raise InteractionError ("%d retries limit exceeded" % retries)
+
+refs_count = 0
+
+def check_ref (manual, file, m):
+ global fixes_count, bad_refs_count, refs_count
+ refs_count += 1
+ bad_ref = False
+ fixed = True
+ type = m.group (1)
+ original_name = m.group ('ref') or m.group ('refname')
+ name = whitespace_re.sub (' ', original_name). strip ()
+ newline_indices = manuals[manual]['newline_indices'][file]
+ line = which_line (m.start (), newline_indices)
+ linebroken = '\n' in original_name
+ original_display_name = m.group ('display')
+ next_char = m.group ('last')
+ if original_display_name: # the xref has an explicit display name
+ display_linebroken = '\n' in original_display_name
+ display_name = whitespace_re.sub (' ', original_display_name). strip ()
+ commented_out = is_commented_out \
+ (m.start (), m.end (), manuals[manual]['comments_boundaries'][file])
+ useful_fix = not outdir in file
+
+ # check puncuation after x-ref
+ if options.check_punctuation and not next_char in '.,;:!?':
+ stdout.write ("Warning: %s: %d: `%s': x-ref \
+not followed by punctuation\n" % (file, line, name))
+
+ # validate xref
+ explicit_type = type
+ new_name = name
+
+ if type != 'ref' and type == manual and not commented_out:
+ if useful_fix:
+ fixed = False
+ bad_ref = True
+ stdout.write ("\n%s: %d: `%s': external %s x-ref should be internal\n"
+ % (file, line, name, type))
+ if options.auto_fix or yes_prompt ("Fix this?"):
+ type = 'ref'
+
+ if type == 'ref':
+ explicit_type = manual
+
+ if not name in manuals[explicit_type]['nodes'] and not commented_out:
+ bad_ref = True
+ fixed = False
+ stdout.write ('\n')
+ if type == 'ref':
+ stdout.write ("\e[1;31m%s: %d: `%s': wrong internal x-ref\e[0m\n"
+ % (file, line, name))
+ else:
+ stdout.write ("\e[1;31m%s: %d: `%s': wrong external `%s' x-ref\e[0m\n"
+ % (file, line, name, type))
+ # print context
+ stdout.write ('--\n' + manuals[manual]['contents'][file]
+ [newline_indices[max (0, line - 2)]:
+ newline_indices[min (line + 3,
+ len (newline_indices) - 1)]] +
+ '--\n')
+
+ # try to find the reference in other manuals
+ found = []
+ for k in [k for k in manuals if k != explicit_type]:
+ if name in manuals[k]['nodes']:
+ if k == manual:
+ found = ['ref']
+ stdout.write ("\e[1;32m found as internal x-ref\e[0m\n")
+ break
+ else:
+ found.append (k)
+ stdout.write ("\e[1;32m found as `%s' x-ref\e[0m\n" % k)
+
+ if (len (found) == 1
+ and (options.auto_fix or yes_prompt ("Fix this x-ref?"))):
+ add_fix (type, name, found[0], name)
+ type = found[0]
+ fixed = True
+
+ elif len (found) > 1 and useful_fix:
+ if options.interactive or options.auto_fix:
+ stdout.write ("* Several manuals contain this node name, \
+cannot determine manual automatically.\n")
+ if options.interactive:
+ t = choose_in_numbered_list ("Choose manual for this x-ref by \
+index number or beginning of name:\n", found)
+ if t:
+ add_fix (type, name, t, name)
+ type = t
+ fixed = True
+
+ if not fixed:
+ # try to find a fix already made
+ found = lookup_fix (name)
+
+ if len (found) == 1:
+ stdout.write ("Found one previous fix: %s `%s'\n" % found[0])
+ if options.auto_fix or yes_prompt ("Apply this fix?"):
+ type, new_name = found[0]
+ fixed = True
+
+ elif len (found) > 1:
+ if options.interactive or options.auto_fix:
+ stdout.write ("* Several previous fixes match \
+this node name, cannot fix automatically.\n")
+ if options.interactive:
+ concatened = choose_in_numbered_list ("Choose new manual \
+and x-ref by index number or beginning of name:\n", [''.join ([i[0], ' ', i[1]])
+ for i in found],
+ sep='\n')
+ if concatened:
+ type, new_name = concatenated.split (' ', 1)
+ fixed = True
+
+ if not fixed:
+ # all previous automatic fixing attempts failed,
+ # ask user for substring to look in node names
+ while True:
+ node_list = search_prompt ()
+ if node_list == None:
+ if options.interactive:
+ stdout.write (warn_not_fixed)
+ break
+ elif not node_list:
+ stdout.write ("No matched node names.\n")
+ else:
+ concatenated = choose_in_numbered_list ("Choose \
+node name and manual for this x-ref by index number or beginning of name:\n", \
+ [' '.join ([i[0], i[1], '(in %s)' % i[2]])
+ for i in node_list],
+ sep='\n')
+ if concatenated:
+ t, z = concatenated.split (' ', 1)
+ new_name = z.split (' (in ', 1)[0]
+ add_fix (type, name, t, new_name)
+ type = t
+ fixed = True
+ break
+
+ if fixed and type == manual:
+ type = 'ref'
+ bad_refs_count += int (bad_ref)
+ if bad_ref and not useful_fix:
+ stdout.write ("*** Warning: this file is automatically generated, \
+please fix the code source instead of generated documentation.\n")
+
+ # compute returned string
+ if new_name == name:
+ if bad_ref and (options.interactive or options.auto_fix):
+ # only the type of the ref was fixed
+ fixes_count += int (fixed)
+ if original_display_name:
+ return ('@%snamed{%s,%s}' % (type, original_name, original_display_name)) + next_char
+ else:
+ return ('@%s{%s}' % (type, original_name)) + next_char
+ else:
+ fixes_count += int (fixed)
+ (ref, n) = preserve_linebreak (new_name, linebroken)
+ if original_display_name:
+ if bad_ref:
+ stdout.write ("Current display name is `%s'\n")
+ display_name = raw_input \
+ ("Enter a new display name or press enter to keep the existing name:\n") \
+ or display_name
+ (display_name, n) = preserve_linebreak (display_name, display_linebroken)
+ else:
+ display_name = original_display_name
+ return ('@%snamed{%s,%s}' % (type, ref, display_name)) + \
+ next_char + n
+ else:
+ return ('@%s{%s}' % (type, ref)) + next_char + n
+
+
+log.write ("Checking cross-references...\n")
+
+try:
+ for key in manuals:
+ for file in manuals[key]['contents']:
+ s = ref_re.sub (lambda m: check_ref (key, file, m),
+ manuals[key]['contents'][file])
+ if s != manuals[key]['contents'][file]:
+ open (file, 'w').write (s)
+except KeyboardInterrupt:
+ log.write ("Operation interrupted, exiting.\n")
+ sys.exit (2)
+except InteractionError, instance:
+ log.write ("Operation refused by user: %s\nExiting.\n" % instance)
+ sys.exit (3)
+
+log.write ("\e[1;36mDone: %d x-refs found, %d bad x-refs found, fixed %d.\e[0m\n" %
+ (refs_count, bad_refs_count, fixes_count))
--- /dev/null
+#!/usr/bin/env python
+
+import __main__
+import optparse
+import os
+import sys
+
+import langdefs
+import buildlib
+
+verbose = 0
+use_colors = False
+lang = 'C'
+C = lang
+
+def dir_lang (file, lang, lang_dir_index):
+ path_components = file.split ('/')
+ path_components[lang_dir_index] = lang
+ return os.path.join (*path_components)
+
+def do_file (file_name, lang_codes, buildlib):
+ if verbose:
+ sys.stderr.write ('%s...\n' % file_name)
+ split_file_name = file_name.split ('/')
+ d1, d2 = split_file_name[0:2]
+ if d1 in lang_codes:
+ check_lang = d1
+ lang_dir_index = 0
+ elif d2 in lang_codes:
+ check_lang = d2
+ lang_dir_index = 1
+ else:
+ check_lang = lang
+ if check_lang == C:
+ raise Exception ('cannot determine language for ' + file_name)
+
+ original = dir_lang (file_name, '', lang_dir_index)
+ translated_contents = open (file_name).read ()
+ (diff_string, error) \
+ = buildlib.check_translated_doc (original,
+ file_name,
+ translated_contents,
+ color=use_colors and not update_mode)
+
+ if error:
+ sys.stderr.write ('warning: %s: %s' % (file_name, error))
+
+ if update_mode:
+ if error or len (diff_string) >= os.path.getsize (original):
+ buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + original)
+ elif diff_string:
+ diff_file = original + '.diff'
+ f = open (diff_file, 'w')
+ f.write (diff_string)
+ f.close ()
+ buildlib.read_pipe (text_editor + ' ' + file_name + ' ' + diff_file)
+ os.remove (diff_file)
+ else:
+ sys.stdout.write (diff_string)
+
+def usage ():
+ sys.stdout.write (r'''
+Usage:
+check-translation [--language=LANG] [--verbose] [--update] FILE...
+
+This script is licensed under the GNU GPL.
+''')
+
+def do_options ():
+ global lang, verbose, update_mode, use_colors
+
+ p = optparse.OptionParser (usage="check-translation [--language=LANG] [--verbose] FILE...",
+ description="This script is licensed under the GNU GPL.")
+ p.add_option ("--language",
+ action='store',
+ default='site',
+ dest="language")
+ p.add_option ("--no-color",
+ action='store_false',
+ default=True,
+ dest="color",
+ help="do not print ANSI-cooured output")
+ p.add_option ("--verbose",
+ action='store_true',
+ default=False,
+ dest="verbose",
+ help="print details, including executed shell commands")
+ p.add_option ('-u', "--update",
+ action='store_true',
+ default=False,
+ dest='update_mode',
+ help='call $EDITOR to update the translation')
+
+ (options, files) = p.parse_args ()
+ verbose = options.verbose
+ lang = options.language
+ use_colors = options.color
+ update_mode = options.update_mode
+
+ return files
+
+def main ():
+ global update_mode, text_editor
+
+ files = do_options ()
+ if 'EDITOR' in os.environ:
+ text_editor = os.environ['EDITOR']
+ else:
+ update_mode = False
+
+ buildlib.verbose = verbose
+
+ for i in files:
+ do_file (i, langdefs.LANGDICT.keys (), buildlib)
+
+if __name__ == '__main__':
+ main ()
--- /dev/null
+#!/usr/bin/env python
+
+import os
+import glob
+import re
+import sys
+import optparse
+
+#File 'accidental-engraver.cc'
+#Lines executed:87.70% of 252
+
+def summary (args):
+ results = []
+ for f in args:
+ str = open (f).read ()
+ m = re.search ("File '([^']+.cc)'\s*Lines executed:([0-9.]+)% of ([0-9]+)", str)
+
+ if m and '/usr/lib' in m.group (1):
+ continue
+
+ if m:
+ cov = float (m.group (2))
+ lines = int (m.group (3))
+ pain = lines * (100.0 - cov)
+ file = m.group (1)
+ tup = (pain, locals ().copy())
+
+ results.append(tup)
+
+ results.sort ()
+ results.reverse()
+
+ print 'files sorted by number of untested lines (decreasing)'
+ print
+ print '%5s (%6s): %s' % ('cov %', 'lines', 'file')
+ print '----------------------------------------------'
+
+ for (pain, d) in results:
+ print '%(cov)5.2f (%(lines)6d): %(file)s' % d
+
+class Chunk:
+ def __init__ (self, range, coverage_count, all_lines, file):
+ assert coverage_count >= 0
+ assert type (range) == type (())
+
+ self.coverage_count = coverage_count
+ self.range = range
+ self.all_lines = all_lines
+ self.file = file
+
+ def length (self):
+ return self.range[1] - self.range[0]
+
+ def text (self):
+ return ''.join ([l[2] for l in self.lines()])
+
+ def lines (self):
+ return self.all_lines[self.range[0]:
+ self.range[1]]
+ def widen (self):
+ self.range = (min (self.range[0] -1, 0),
+ self.range[0] +1)
+ def write (self):
+ print 'chunk in', self.file
+ for (c, n, l) in self.lines ():
+ cov = '%d' % c
+ if c == 0:
+ cov = '#######'
+ elif c < 0:
+ cov = ''
+ sys.stdout.write ('%8s:%8d:%s' % (cov, n, l))
+
+ def uncovered_score (self):
+ return self.length ()
+
+class SchemeChunk (Chunk):
+ def uncovered_score (self):
+ text = self.text ()
+ if (text.startswith ('(define ')
+ and not text.startswith ('(define (')):
+ return 0
+
+ if text.startswith ('(use-modules '):
+ return 0
+
+ if (text.startswith ('(define-public ')
+ and not text.startswith ('(define-public (')):
+ return 0
+
+ return len ([l for (c,n,l) in self.lines() if (c == 0)])
+
+def read_gcov (f):
+ ls = []
+
+ in_lines = [l for l in open (f).readlines ()]
+ (count_len, line_num_len) = tuple (map (len, in_lines[0].split (':')[:2]))
+
+ for l in in_lines:
+ c = l[:count_len].strip ()
+ l = l[count_len+1:]
+ n = int (l[:line_num_len].strip ())
+
+ if n == 0:
+ continue
+
+ if '#' in c:
+ c = 0
+ elif c == '-':
+ c = -1
+ else:
+ c = int (c)
+
+ l = l[line_num_len+1:]
+
+ ls.append ((c,n,l))
+
+ return ls
+
+def get_c_chunks (ls, file):
+ chunks = []
+ chunk = []
+
+ last_c = -1
+ for (c, n, l) in ls:
+ if not (c == last_c or c < 0 and l != '}\n'):
+ if chunk and last_c >= 0:
+ nums = [n-1 for (n, l) in chunk]
+ chunks.append (Chunk ((min (nums), max (nums)+1),
+ last_c, ls, file))
+ chunk = []
+
+ chunk.append ((n,l))
+ if c >= 0:
+ last_c = c
+
+ return chunks
+
+def get_scm_chunks (ls, file):
+ chunks = []
+ chunk = []
+
+ def new_chunk ():
+ if chunk:
+ nums = [n-1 for (n, l) in chunk]
+ chunks.append (SchemeChunk ((min (nums), max (nums)+1),
+ max (last_c, 0), ls, file))
+ chunk[:] = []
+
+ last_c = -1
+ for (cov_count, line_number, line) in ls:
+ if line.startswith ('('):
+ new_chunk ()
+ last_c = -1
+
+ chunk.append ((line_number, line))
+ if cov_count >= 0:
+ last_c = cov_count
+
+ return chunks
+
+def widen_chunk (ch, ls):
+ a -= 1
+ b += 1
+
+ return [(n, l) for (c, n, l) in ls[a:b]]
+
+
+def extract_chunks (file):
+ try:
+ ls = read_gcov (file)
+ except IOError, s :
+ print s
+ return []
+
+ cs = []
+ if 'scm' in file:
+ cs = get_scm_chunks (ls, file)
+ else:
+ cs = get_c_chunks (ls, file)
+ return cs
+
+
+def filter_uncovered (chunks):
+ def interesting (c):
+ if c.coverage_count > 0:
+ return False
+
+ t = c.text()
+ for stat in ('warning', 'error', 'print', 'scm_gc_mark'):
+ if stat in t:
+ return False
+ return True
+
+ return [c for c in chunks if interesting (c)]
+
+
+def main ():
+ p = optparse.OptionParser (usage="usage coverage.py [options] files",
+ description="")
+ p.add_option ("--summary",
+ action='store_true',
+ default=False,
+ dest="summary")
+
+ p.add_option ("--hotspots",
+ default=False,
+ action='store_true',
+ dest="hotspots")
+
+ p.add_option ("--uncovered",
+ default=False,
+ action='store_true',
+ dest="uncovered")
+
+
+ (options, args) = p.parse_args ()
+
+
+ if options.summary:
+ summary (['%s.gcov-summary' % s for s in args])
+
+ if options.uncovered or options.hotspots:
+ chunks = []
+ for a in args:
+ name = a
+ if name.endswith ('scm'):
+ name += '.cov'
+ else:
+ name += '.gcov'
+
+ chunks += extract_chunks (name)
+
+ if options.uncovered:
+ chunks = filter_uncovered (chunks)
+ chunks = [(c.uncovered_score (), c) for c in chunks if c.uncovered_score() > 0]
+ elif options.hotspots:
+ chunks = [((c.coverage_count, -c.length()), c) for c in chunks]
+
+
+ chunks.sort ()
+ chunks.reverse ()
+ for (score, c) in chunks:
+ c.write ()
+
+
+
+if __name__ == '__main__':
+ main ()
--- /dev/null
+#!/usr/bin/env python
+import sys
+import re
+import os
+
+
+full_paths = {}
+incs = {}
+inc_re = re.compile ('^#include "([^"]+)"')
+def parse_file (fn):
+ lst = []
+
+ lc = 0
+ for l in open (fn).readlines():
+ lc += 1
+ m = inc_re.search (l)
+ if m:
+ lst.append ((lc, m.group (1)))
+
+ base = os.path.split (fn)[1]
+ full_paths[base] = fn
+ incs[base] = lst
+
+
+def has_include (f, name):
+ try:
+ return name in [b for (a,b) in incs[f]]
+ except KeyError:
+ return False
+
+for a in sys.argv:
+ parse_file (a)
+
+print '-*-compilation-*-'
+for (f, lst) in incs.items ():
+ for (n, inc) in lst:
+ for (n2, inc2) in lst:
+ if has_include (inc2, inc):
+ print "%s:%d: already have %s from %s" % (full_paths[f], n,
+ inc, inc2)
+ break
+
+
+
--- /dev/null
+#!/usr/bin/env python
+
+# fixcc -- nitpick lily's c++ code
+
+# TODO
+# * maintainable rules: regexp's using whitespace (?x) and match names
+# <identifier>)
+# * trailing `*' vs. function definition
+# * do not break/change indentation of fixcc-clean files
+# * check lexer, parser
+# * rewrite in elisp, add to cc-mode
+# * using regexes is broken by design
+# * ?
+# * profit
+
+import __main__
+import getopt
+import os
+import re
+import string
+import sys
+import time
+
+COMMENT = 'COMMENT'
+STRING = 'STRING'
+GLOBAL_CXX = 'GC++'
+CXX = 'C++'
+verbose_p = 0
+indent_p = 0
+
+rules = {
+ GLOBAL_CXX:
+ [
+ # delete gratuitous block
+ ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
+ '\n\\2;'),
+ ],
+ CXX:
+ [
+ # space before parenthesis open
+ ('([^\( \]])[ \t]*\(', '\\1 ('),
+ # space after comma
+ ("\([^'],\)[ \t]*", '\1 '),
+ # delete gratuitous block
+ ('''\n( |\t)\s*{\n\s*(.*?)(?![{}]|\b(do|for|else|if|switch|while)\b);\n\s*}''',
+ '\n\\2;'),
+ # delete inline tabs
+ ('(\w)\t+', '\\1 '),
+ # delete inline double spaces
+ (' *', ' '),
+ # delete space after parenthesis open
+ ('\([ \t]*', '('),
+ # delete space before parenthesis close
+ ('[ \t]*\)', ')'),
+ # delete spaces after prefix
+ ('(--|\+\+)[ \t]*([\w\)])', '\\1\\2'),
+ # delete spaces before postfix
+ ('([\w\)\]])[ \t]*(--|\+\+)', '\\1\\2'),
+ # delete space after parenthesis close
+ #('\)[ \t]*([^\w])', ')\\1'),
+ # delete space around operator
+ # ('([\w\(\)\]])([ \t]*)(::|\.)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
+ ('([\w\(\)\]])([ \t]*)(\.|->)([ \t]*)([\w\(\)])', '\\1\\3\\5'),
+ # delete space after operator
+ ('(::)([ \t]*)([\w\(\)])', '\\1\\3'),
+ # delete superflous space around operator
+ ('([\w\(\)\]])([ \t]+)(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&|\||\*)([ \t]+)([\w\(\)])', '\\1 \\3 \\5'),
+ # space around operator1
+ ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
+ # space around operator2
+ ('([\w\)\]]) *(&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|=|/|:|&|\||\*) ([^\w\s])', '\\1 \\2 \\3'),
+ # space around operator3
+ ('([^\w\s]) (&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|[^-]>|=|/|:|&|\||\*) *([\w\(])', '\\1 \\2 \\3'),
+ # space around operator4
+ ('([\w\(\)\]]) (\*|/|\+|-) *([-:])', '\\1 \\2 \\3'),
+ # space around +/-; exponent
+ ('([\w\)\]])(\+|-)([_A-Za-z\(])', '\\1 \\2 \\3'),
+ ('([_\dA-Za-df-z\)\]])(\+|-)([\w\(])', '\\1 \\2 \\3'),
+ # trailing operator
+ (' (::|&&|\|\||<=|>=|!=|\|=|==|\+=|-=|\*=|/=|\?|<|>|\+|-|=|/|:|&XXX|\||\*XXX)[ \t]*\n([ \t]*)', '\n\\2\\1 '),
+ # pointer
+ ##('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
+ ('(bool|char|const|delete|int|stream|unsigned|void|size_t|struct \w+|[A-Z]\w*|,|;|:|=|\?\)|&&|<|[^-]>|\|\||-|\+)[ \t]*(\*|&)[ \t]*', '\\1 \\2'),
+ #to#('(bool|char|const|delete|int|stream|unsigned|void|([A-Z]\w*)|[,])[ \n\t]*(\*|&)[ \t]*', '\\1 \\3'),
+ # pointer with template
+ ('(( *((bool|char|const|delete|int|stream|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*|\w+::\w+|[,])[ \*&],*)+)>) *(\*|&) *', '\\1 \\5'),
+ #to#('(( *((bool|char|delete|int|stream|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)|[,])[ \*&],*)+)>)[ \t\n]*(\*|&) *', '\\1 \\7'),
+ # unary pointer, minus, not
+ ('(return|=) (\*|&|-|!) ([\w\(])', '\\1 \\2\\3'),
+ # space after `operator'
+ ('(\Woperator) *([^\w\s])', '\\1 \\2'),
+ # dangling brace close
+ ('\n[ \t]*(\n[ \t]*})', '\\1'),
+ # dangling newline
+ ('\n[ \t]*\n[ \t]*\n', '\n\n'),
+ # dangling parenthesis open
+ #('[ \t]*\n[ \t]*\([ \t]*\n', '('),
+ ('\([ \t]*\n', '('),
+ # dangling parenthesis close
+ ('\n[ \t]*\)', ')'),
+ # dangling comma
+ ('\n[ \t]*,', ','),
+ # dangling semicolon
+ ('\n[ \t]*;', ';'),
+ # brace open
+ ('(\w)[ \t]*([^\s]*){([ \t]*\n)', '\\1\\2\n{\n'),
+ # brace open backslash
+ ('(\w[^\n]*){[ \t]*\\\\\n', '\\1\\\n{\\\n'),
+ # brace close
+ ("}[ \t]*([^'\n]*\w[^\n\\\]*)\n", '}\n\\1\n'),
+ # brace close backslash
+ ("}[ \t]*([^'\n]*\w[^\n\\\]*)", '\n}\n\\1'),
+ # delete space after `operator'
+ #('(\Woperator) (\W)', '\\1\\2'),
+ # delete space after case, label
+ ('(\W(case|label) ([\w]+)) :', '\\1:'),
+ # delete space before comma
+ ('[ \t]*,', ','),
+ # delete space before semicolon
+ ('[ \t]*;', ';'),
+ # delete space before eol-backslash
+ ('[ \t]*\\\\\n', '\\\n'),
+ # delete trailing whitespace
+ ('[ \t]*\n', '\n'),
+
+ ## Deuglify code that also gets ugly by rules above.
+ # delete newline after typedef struct
+ ('(typedef struct\s+([\w]*\s){([^}]|{[^}]*})*})\s*\n\s*(\w[\w\d]*;)', '\\1 \\4'),
+ # delete spaces around template brackets
+ #('(dynamic_cast|template|([A-Z]\w*))[ \t]*<[ \t]*(( *(bool|char|int|unsigned|void|(class[ \t]+\w*)|([A-Z]\w*)),?)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\3\\8>'),
+ ('(dynamic_cast|template|typedef|\w+::\w+|[A-Z]\w*)[ \t]*<[ \t]*(( *(bool|char|const|int|unsigned|void|size_t|class[ \t]+\w*|[A-Z]\w*)( *[\*&]?,|[\*&])*)+)[ \t]?(| [\*&])[ \t]*>', '\\1<\\2\\6>'),
+ ('(\w+::\w+|[A-Z]\w*) < ((\w+::\w+|[A-Z]\w*)<[A-Z]\w*>) >', '\\1<\\2 >'),
+ ('((if|while)\s+\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\n;'),
+ ('(for\s+\(([^;]*;[^;]*;([^\)]|\([^\)]*\))*)\))\s*;', '\\1\n;'),
+ # do {..} while
+ ('(}\s*while\s*)(\(([^\)]|\([^\)]*\))*\))\s*;', '\\1\\2;'),
+
+ ## Fix code that gets broken by rules above.
+ ##('->\s+\*', '->*'),
+ # delete space before #define x()
+ ('#[ \t]*define (\w*)[ \t]*\(', '#define \\1('),
+ # add space in #define x ()
+ ('#[ \t]*define (\w*)(\(([^\(\)]|\([^\(\)]*\))*\)\\n)',
+ '#define \\1 \\2'),
+ # delete space in #include <>
+ ('#[ \t]*include[ \t]*<[ \t]*([^ \t>]*)[ \t]*(/?)[ \t]*([^ \t>]*)[ \t]*>',
+ '#include <\\1\\2\\3>'),
+ # delete backslash before empty line (emacs' indent region is broken)
+ ('\\\\\n\n', '\n\n'),
+ ],
+
+ COMMENT:
+ [
+ # delete trailing whitespace
+ ('[ \t]*\n', '\n'),
+ # delete empty first lines
+ ('(/\*\n)\n*', '\\1'),
+ # delete empty last lines
+ ('\n*(\n\*/)', '\\1'),
+ ## delete newline after start?
+ #('/(\*)\n', '\\1'),
+ ## delete newline before end?
+ #('\n(\*/)', '\\1'),
+ ],
+ }
+
+# Recognize special sequences in the input.
+#
+# (?P<name>regex) -- Assign result of REGEX to NAME.
+# *? -- Match non-greedily.
+# (?m) -- Multiline regex: Make ^ and $ match at each line.
+# (?s) -- Make the dot match all characters including newline.
+# (?x) -- Ignore whitespace in patterns.
+no_match = 'a\ba'
+snippet_res = {
+ CXX: {
+ 'multiline_comment':
+ r'''(?sx)
+ (?P<match>
+ (?P<code>
+ [ \t]*/\*.*?\*/))''',
+
+ 'singleline_comment':
+ r'''(?mx)
+ ^.*
+ (?P<match>
+ (?P<code>
+ [ \t]*//([ \t][^\n]*|)\n))''',
+
+ 'string':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ "([^\"\n](\")*)*"))''',
+
+ 'char':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ '([^']+|\')))''',
+
+ 'include':
+ r'''(?x)
+ (?P<match>
+ (?P<code>
+ "#[ \t]*include[ \t]*<[^>]*>''',
+ },
+ }
+
+class Chunk:
+ def replacement_text (self):
+ return ''
+
+ def filter_text (self):
+ return self.replacement_text ()
+
+class Substring (Chunk):
+ def __init__ (self, source, start, end):
+ self.source = source
+ self.start = start
+ self.end = end
+
+ def replacement_text (self):
+ s = self.source[self.start:self.end]
+ if verbose_p:
+ sys.stderr.write ('CXX Rules')
+ for i in rules[CXX]:
+ if verbose_p:
+ sys.stderr.write ('.')
+ #sys.stderr.write ('\n\n***********\n')
+ #sys.stderr.write (i[0])
+ #sys.stderr.write ('\n***********\n')
+ #sys.stderr.write ('\n=========>>\n')
+ #sys.stderr.write (s)
+ #sys.stderr.write ('\n<<=========\n')
+ s = re.sub (i[0], i[1], s)
+ if verbose_p:
+ sys.stderr.write ('done\n')
+ return s
+
+
+class Snippet (Chunk):
+ def __init__ (self, type, match, format):
+ self.type = type
+ self.match = match
+ self.hash = 0
+ self.options = []
+ self.format = format
+
+ def replacement_text (self):
+ return self.match.group ('match')
+
+ def substring (self, s):
+ return self.match.group (s)
+
+ def __repr__ (self):
+ return `self.__class__` + ' type = ' + self.type
+
+class Multiline_comment (Snippet):
+ def __init__ (self, source, match, format):
+ self.type = type
+ self.match = match
+ self.hash = 0
+ self.options = []
+ self.format = format
+
+ def replacement_text (self):
+ s = self.match.group ('match')
+ if verbose_p:
+ sys.stderr.write ('COMMENT Rules')
+ for i in rules[COMMENT]:
+ if verbose_p:
+ sys.stderr.write ('.')
+ s = re.sub (i[0], i[1], s)
+ return s
+
+snippet_type_to_class = {
+ 'multiline_comment': Multiline_comment,
+# 'string': Multiline_comment,
+# 'include': Include_snippet,
+}
+
+def find_toplevel_snippets (s, types):
+ if verbose_p:
+ sys.stderr.write ('Dissecting')
+
+ res = {}
+ for i in types:
+ res[i] = re.compile (snippet_res[format][i])
+
+ snippets = []
+ index = 0
+ ## found = dict (map (lambda x: (x, None),
+ ## types))
+ ## urg python2.1
+ found = {}
+ map (lambda x, f = found: f.setdefault (x, None),
+ types)
+
+ # We want to search for multiple regexes, without searching
+ # the string multiple times for one regex.
+ # Hence, we use earlier results to limit the string portion
+ # where we search.
+ # Since every part of the string is traversed at most once for
+ # every type of snippet, this is linear.
+
+ while 1:
+ if verbose_p:
+ sys.stderr.write ('.')
+ first = None
+ endex = 1 << 30
+ for type in types:
+ if not found[type] or found[type][0] < index:
+ found[type] = None
+ m = res[type].search (s[index:endex])
+ if not m:
+ continue
+
+ cl = Snippet
+ if snippet_type_to_class.has_key (type):
+ cl = snippet_type_to_class[type]
+ snip = cl (type, m, format)
+ start = index + m.start ('match')
+ found[type] = (start, snip)
+
+ if found[type] \
+ and (not first \
+ or found[type][0] < found[first][0]):
+ first = type
+
+ # FIXME.
+
+ # Limiting the search space is a cute
+ # idea, but this *requires* to search
+ # for possible containing blocks
+ # first, at least as long as we do not
+ # search for the start of blocks, but
+ # always/directly for the entire
+ # @block ... @end block.
+
+ endex = found[first][0]
+
+ if not first:
+ snippets.append (Substring (s, index, len (s)))
+ break
+
+ (start, snip) = found[first]
+ snippets.append (Substring (s, index, start))
+ snippets.append (snip)
+ found[first] = None
+ index = start + len (snip.match.group ('match'))
+
+ return snippets
+
+def nitpick_file (outdir, file):
+ s = open (file).read ()
+
+ for i in rules[GLOBAL_CXX]:
+ s = re.sub (i[0], i[1], s)
+
+ # FIXME: Containing blocks must be first, see
+ # find_toplevel_snippets.
+ # We leave simple strings be part of the code
+ snippet_types = (
+ 'multiline_comment',
+ 'singleline_comment',
+ 'string',
+# 'char',
+ )
+
+ chunks = find_toplevel_snippets (s, snippet_types)
+ #code = filter (lambda x: is_derived_class (x.__class__, Substring),
+ # chunks)
+
+ t = string.join (map (lambda x: x.filter_text (), chunks), '')
+ fixt = file
+ if s != t:
+ if not outdir:
+ os.system ('mv %s %s~' % (file, file))
+ else:
+ fixt = os.path.join (outdir,
+ os.path.basename (file))
+ h = open (fixt, "w")
+ h.write (t)
+ h.close ()
+ if s != t or indent_p:
+ indent_file (fixt)
+
+def indent_file (file):
+ emacs = '''emacs\
+ --no-window-system\
+ --batch\
+ --no-site-file\
+ --no-init-file\
+ %(file)s\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "cc-mode")
+ (c++-mode)
+ (indent-region (point-min) (point-max))
+ (if (buffer-modified-p (current-buffer))
+ (save-buffer)))' ''' % vars ()
+ emacsclient = '''emacsclient\
+ --socket-name=%(socketdir)s/%(socketname)s\
+ --no-wait\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "cc-mode")
+ (find-file "%(file)s")
+ (c++-mode)
+ (indent-region (point-min) (point-max))
+ (if (buffer-modified-p (current-buffer))
+ (save-buffer)))' ''' \
+ % { 'file': file,
+ 'socketdir' : socketdir,
+ 'socketname' : socketname, }
+ if verbose_p:
+ sys.stderr.write (emacs)
+ sys.stderr.write ('\n')
+ os.system (emacs)
+
+
+def usage ():
+ sys.stdout.write (r'''
+Usage:
+fixcc [OPTION]... FILE...
+
+Options:
+ --help
+ --indent reindent, even if no changes
+ --verbose
+ --test
+
+Typical use with LilyPond:
+
+ fixcc $(find flower kpath-guile lily -name '*cc' -o -name '*hh' | grep -v /out)
+
+This script is licensed under the GNU GPL
+''')
+
+def do_options ():
+ global indent_p, outdir, verbose_p
+ (options, files) = getopt.getopt (sys.argv[1:], '',
+ ['help', 'indent', 'outdir=',
+ 'test', 'verbose'])
+ for (o, a) in options:
+ if o == '--help':
+ usage ()
+ sys.exit (0)
+ elif o == '--indent':
+ indent_p = 1
+ elif o == '--outdir':
+ outdir = a
+ elif o == '--verbose':
+ verbose_p = 1
+ elif o == '--test':
+ test ()
+ sys.exit (0)
+ else:
+ assert unimplemented
+ if not files:
+ usage ()
+ sys.exit (2)
+ return files
+
+
+outdir = 0
+format = CXX
+socketdir = '/tmp/fixcc'
+socketname = 'fixcc%d' % os.getpid ()
+
+def setup_client ():
+ #--no-window-system\
+ #--batch\
+ os.unlink (os.path.join (socketdir, socketname))
+ os.mkdir (socketdir, 0700)
+ emacs='''emacs\
+ --no-site-file\
+ --no-init-file\
+ --eval '(let ((error nil)
+ (version-control nil))
+ (load-library "server")
+ (setq server-socket-dir "%(socketdir)s")
+ (setq server-name "%(socketname)s")
+ (server-start)
+ (while t) (sleep 1000))' ''' \
+ % { 'socketdir' : socketdir,
+ 'socketname' : socketname, }
+
+ if not os.fork ():
+ os.system (emacs)
+ sys.exit (0)
+ while not os.path.exists (os.path.join (socketdir, socketname)):
+ time.sleep (1)
+
+def main ():
+ #emacsclient should be faster, but this does not work yet
+ #setup_client ()
+ files = do_options ()
+ if outdir and not os.path.isdir (outdir):
+ os.makedirs (outdir)
+ for i in files:
+ sys.stderr.write ('%s...\n' % i)
+ nitpick_file (outdir, i)
+
+
+## TODO: make this compilable and check with g++
+TEST = '''
+#include <libio.h>
+#include <map>
+class
+ostream ;
+
+class Foo {
+public: static char* foo ();
+std::map<char*,int>* bar (char, char) { return 0; }
+};
+typedef struct
+{
+ Foo **bar;
+} String;
+
+ostream &
+operator << (ostream & os, String d);
+
+typedef struct _t_ligature
+{
+ char *succ, *lig;
+ struct _t_ligature * next;
+} AFM_Ligature;
+
+typedef std::map < AFM_Ligature const *, int > Bar;
+
+ /**
+ (c) 1997--2008 Han-Wen Nienhuys <hanwen@cs.uu.nl>
+ */
+
+/* ||
+* vv
+* !OK OK
+*/
+/* ||
+ vv
+ !OK OK
+*/
+char *
+Foo:: foo ()
+{
+int
+i
+;
+ char* a= &++ i ;
+ a [*++ a] = (char*) foe (*i, &bar) *
+ 2;
+ int operator double ();
+ std::map<char*,int> y =*bar(-*a ,*b);
+ Interval_t<T> & operator*= (T r);
+ Foo<T>*c;
+ int compare (Pqueue_ent < K, T > const& e1, Pqueue_ent < K,T> *e2);
+ delete *p;
+ if (abs (f)*2 > abs (d) *FUDGE)
+ ;
+ while (0);
+ for (; i<x foo(); foo>bar);
+ for (; *p && > y;
+ foo > bar)
+;
+ do {
+ ;;;
+ }
+ while (foe);
+
+ squiggle. extent;
+ 1 && * unsmob_moment (lf);
+ line_spanner_ = make_spanner ("DynamicLineSpanner", rq ? rq->*self_scm
+(): SCM_EOL);
+ case foo: k;
+
+ if (0) {a=b;} else {
+ c=d;
+ }
+
+ cookie_io_functions_t Memory_out_stream::functions_ = {
+ Memory_out_stream::reader,
+ ...
+ };
+
+ int compare (Array < Pitch> *, Array < Pitch> *);
+ original_ = (Grob *) & s;
+ Drul_array< Link_array<Grob> > o;
+}
+
+ header_.char_info_pos = (6 + header_length) * 4;
+ return ly_bool2scm (*ma < * mb);
+
+ 1 *::sign(2);
+
+ (shift) *-d;
+
+ a = 0 ? *x : *y;
+
+a = "foo() 2,2,4";
+{
+ if (!span_)
+ {
+ span_ = make_spanner ("StaffSymbol", SCM_EOL);
+ }
+}
+{
+ if (!span_)
+ {
+ span_ = make_spanner (StaffSymbol, SCM_EOL);
+ }
+}
+'''
+
+def test ():
+ test_file = 'fixcc.cc'
+ open (test_file, 'w').write (TEST)
+ nitpick_file (outdir, test_file)
+ sys.stdout.write (open (test_file).read ())
+
+if __name__ == '__main__':
+ main ()
+
--- /dev/null
+#!/usr/bin/env python
+
+import sys
+import os
+import glob
+import re
+
+USAGE = ''' Usage: makelsr.py LSR_SNIPPETS_DIR
+This script must be run from top of the source tree;
+it updates snippets input/lsr with snippets in input/new or LSR_SNIPPETS_DIR.
+'''
+
+LY_HEADER_LSR = '''%% Do not edit this file; it is auto-generated from LSR http://lsr.dsi.unimi.it
+%% This file is in the public domain.
+'''
+
+LY_HEADER_NEW = '''%% Do not edit this file; it is auto-generated from input/new
+%% This file is in the public domain.
+'''
+
+DEST = os.path.join ('input', 'lsr')
+NEW_LYS = os.path.join ('input', 'new')
+TEXIDOCS = os.path.join ('input', 'texidocs')
+
+TAGS = []
+# NR 1
+TAGS.extend (['pitches', 'rhythms', 'expressive-marks',
+'repeats', 'simultaneous-notes', 'staff-notation',
+'editorial-annotations', 'text'])
+# NR 2
+TAGS.extend (['vocal-music', 'chords', 'keyboards',
+'percussion', 'fretted-strings', 'unfretted-strings',
+'ancient-notation', 'winds', 'world-music'
+])
+
+# other
+TAGS.extend (['contexts-and-engravers', 'tweaks-and-overrides',
+'paper-and-layout', 'breaks', 'spacing', 'midi', 'titles', 'template'])
+
+def exit_with_usage (n=0):
+ sys.stderr.write (USAGE)
+ sys.exit (n)
+
+try:
+ in_dir = sys.argv[1]
+except:
+ exit_with_usage (2)
+
+if not (os.path.isdir (DEST) and os.path.isdir (NEW_LYS)):
+ exit_with_usage (3)
+
+unsafe = []
+unconverted = []
+notags_files = []
+
+# mark the section that will be printed verbatim by lilypond-book
+end_header_re = re.compile ('(\\header {.+?doctitle = ".+?})\n', re.M | re.S)
+
+def mark_verbatim_section (ly_code):
+ return end_header_re.sub ('\\1 % begin verbatim\n\n', ly_code, 1)
+
+# '% LSR' comments are to be stripped
+lsr_comment_re = re.compile (r'\s*%+\s*LSR.*')
+
+begin_header_re = re.compile (r'\\header\s*{', re.M)
+
+# add tags to ly files from LSR
+def add_tags (ly_code, tags):
+ return begin_header_re.sub ('\\g<0>\n lsrtags = "' + tags + '"\n', ly_code, 1)
+
+def copy_ly (srcdir, name, tags):
+ global unsafe
+ global unconverted
+ dest = os.path.join (DEST, name)
+ tags = ', '.join (tags)
+ s = open (os.path.join (srcdir, name)).read ()
+
+ texidoc_translations_path = os.path.join (TEXIDOCS,
+ os.path.splitext (name)[0] + '.texidoc')
+ if os.path.exists (texidoc_translations_path):
+ texidoc_translations = open (texidoc_translations_path).read ()
+ # Since we want to insert the translations verbatim using a
+ # regexp, \\ is understood as ONE escaped backslash. So we have
+ # to escape those backslashes once more...
+ texidoc_translations = texidoc_translations.replace ('\\', '\\\\')
+ s = begin_header_re.sub ('\\g<0>\n' + texidoc_translations, s, 1)
+
+ if in_dir in srcdir:
+ s = LY_HEADER_LSR + add_tags (s, tags)
+ else:
+ s = LY_HEADER_NEW + s
+
+ s = mark_verbatim_section (s)
+ s = lsr_comment_re.sub ('', s)
+ open (dest, 'w').write (s)
+
+ e = os.system ("convert-ly -e '%s'" % dest)
+ if e:
+ unconverted.append (dest)
+ if os.path.exists (dest + '~'):
+ os.remove (dest + '~')
+ # -V seems to make unsafe snippets fail nicer/sooner
+ e = os.system ("lilypond -V -dno-print-pages -dsafe -o /tmp/lsrtest '%s'" % dest)
+ if e:
+ unsafe.append (dest)
+
+def read_source_with_dirs (src):
+ s = {}
+ l = {}
+ for tag in TAGS:
+ srcdir = os.path.join (src, tag)
+ l[tag] = set (map (os.path.basename, glob.glob (os.path.join (srcdir, '*.ly'))))
+ for f in l[tag]:
+ if f in s:
+ s[f][1].append (tag)
+ else:
+ s[f] = (srcdir, [tag])
+ return s, l
+
+
+tags_re = re.compile ('lsrtags\\s*=\\s*"(.+?)"')
+
+def read_source (src):
+ s = {}
+ l = dict ([(tag, set()) for tag in TAGS])
+ for f in glob.glob (os.path.join (src, '*.ly')):
+ basename = os.path.basename (f)
+ m = tags_re.search (open (f, 'r').read ())
+ if m:
+ file_tags = [tag.strip() for tag in m.group (1). split(',')]
+ s[basename] = (src, file_tags)
+ [l[tag].add (basename) for tag in file_tags if tag in TAGS]
+ else:
+ notags_files.append (f)
+ return s, l
+
+
+def dump_file_list (file, list):
+ f = open (file, 'w')
+ f.write ('\n'.join (list) + '\n')
+
+## clean out existing lys and generated files
+map (os.remove, glob.glob (os.path.join (DEST, '*.ly')) +
+ glob.glob (os.path.join (DEST, '*.snippet-list')))
+
+# read LSR source where tags are defined by subdirs
+snippets, tag_lists = read_source_with_dirs (in_dir)
+# read input/new where tags are directly
+s, l = read_source (NEW_LYS)
+snippets.update (s)
+for t in TAGS:
+ tag_lists[t].update (l[t])
+
+for (name, (srcdir, tags)) in snippets.items ():
+ copy_ly (srcdir, name, tags)
+
+for (tag, file_set) in tag_lists.items ():
+ dump_file_list (os.path.join (DEST, tag + '.snippet-list'), sorted(file_set))
+
+if unconverted:
+ sys.stderr.write ('These files could not be converted successfully by convert-ly:\n')
+ sys.stderr.write ('\n'.join (unconverted) + '\n\n')
+
+if notags_files:
+ sys.stderr.write ('No tags could be found in these files:\n')
+ sys.stderr.write ('\n'.join (notags_files) + '\n\n')
+
+dump_file_list ('lsr-unsafe.txt', unsafe)
+sys.stderr.write ('''
+
+Unsafe files printed in lsr-unsafe.txt: CHECK MANUALLY!
+ git add input/lsr/*.ly
+ xargs git-diff HEAD < lsr-unsafe.txt
+
+''')
+
--- /dev/null
+#!/usr/bin/env python
+
+notes = "CDEFGAB"
+alterations = [-1, 0, 1]
+
+def print_note (octave, note, alteration):
+ print " <note>\n <pitch>\n <step>%s</step>" % notes[note]
+ if alteration <> 0:
+ print " <alter>%s</alter>" % alteration
+ print " <octave>%s</octave>\n </pitch>\n <duration>1</duration>\n <voice>1</voice>\n <type>quarter</type>\n </note>" % octave
+
+
+print """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
+ "http://www.musicxml.org/dtds/partwise.dtd">
+<score-partwise>
+ <movement-title>Various piches and interval sizes</movement-title>
+ <part-list>
+ <score-part id="P1">
+ <part-name>MusicXML Part</part-name>
+ </score-part>
+ </part-list>
+ <!--=========================================================-->
+ <part id="P1">
+ <measure number="1">
+ <attributes>
+ <divisions>1</divisions>
+ <key>
+ <fifths>0</fifths>
+ <mode>major</mode>
+ </key>
+ <time symbol="common">
+ <beats>2</beats>
+ <beat-type>4</beat-type>
+ </time>
+ <clef>
+ <sign>G</sign>
+ <line>2</line>
+ </clef>
+ </attributes>
+"""
+
+start_octave = 5
+
+for octave in (start_octave, start_octave+1):
+ for note in (0,1,2,3,4,5,6):
+ for alteration in alterations:
+ if octave == start_octave and note == 0 and alteration == -1:
+ continue
+ print_note (octave, note, alteration)
+# if octave == start_octave and note == 0 and alteration == 0:
+# continue
+ print_note (start_octave-(octave-start_octave)-(1-(7-note)/7), (7-note)%7, -alteration)
+
+print """ </measure>
+ </part>
+</score-partwise>
+"""
--- /dev/null
+#!/usr/bin/env python
+
+notes = "CDEFGAB"
+alterations = [-1, 0, 1]
+
+def print_measure (nr, fifth, mode, atts1 = "", atts = "", final = ""):
+ print """ <measure number="%s">
+ <attributes>
+%s <key>
+ <fifths>%s</fifths>
+ <mode>%s</mode>
+ </key>
+%s </attributes>
+ <note>
+ <pitch>
+ <step>C</step>
+ <octave>4</octave>
+ </pitch>
+ <duration>2</duration>
+ <voice>1</voice>
+ <type>half</type>
+ </note>
+%s </measure>""" % (nr, atts1, fifth, mode, atts, final)
+
+first_div = """ <divisions>1</divisions>
+"""
+first_atts = """ <time symbol="common">
+ <beats>2</beats>
+ <beat-type>4</beat-type>
+ </time>
+ <clef>
+ <sign>G</sign>
+ <line>2</line>
+ </clef>
+"""
+
+final_barline = """ <barline location="right">
+ <bar-style>light-heavy</bar-style>
+ </barline>
+"""
+
+print """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
+ "http://www.musicxml.org/dtds/partwise.dtd">
+<score-partwise>
+ <movement-title>Different Key signatures</movement-title>
+ <identification>
+ <miscellaneous>
+ <miscellaneous-field name="description">Various key signature: from 11
+ flats to 11 sharps (each one first one measure in major, then one
+ measure in minor)</miscellaneous-field>
+ </miscellaneous>
+ </identification>
+ <part-list>
+ <score-part id="P1">
+ <part-name>MusicXML Part</part-name>
+ </score-part>
+ </part-list>
+ <!--=========================================================-->
+ <part id="P1">"""
+
+max_range = 11
+measure = 0
+for fifth in range(-max_range, max_range+1):
+ measure += 1
+ if fifth == -max_range:
+ print_measure (measure, fifth, "major", first_div, first_atts)
+ else:
+ print_measure (measure, fifth, "major")
+ measure += 1
+ if fifth == max_range:
+ print_measure (measure, fifth, "minor", "", "", final_barline)
+ else:
+ print_measure (measure, fifth, "minor")
+
+
+print """ </part>
+</score-partwise>"""
--- /dev/null
+#!/usr/bin/env python
+
+notes = "CDEFGAB"
+alterations = [-1, 0, 1]
+
+dot_xml = """ <dot/>
+"""
+tie_xml = """ <tie type="%s"/>
+"""
+tie_notation_xml = """ <notations><tied type="%s"/></notations>
+"""
+
+
+def generate_note (duration, end_tie = False):
+ if duration < 2:
+ (notetype, dur) = ("8th", 1)
+ elif duration < 4:
+ (notetype, dur) = ("quarter", 2)
+ elif duration < 8:
+ (notetype, dur) = ("half", 4)
+ else:
+ (notetype, dur) = ("whole", 8)
+ dur_processed = dur
+ dot = ""
+ if (duration - dur_processed >= dur/2):
+ dot = dot_xml
+ dur_processed += dur/2
+ if (duration - dur_processed >= max(dur/4, 1)):
+ dot += dot_xml
+ dur_processed += dur/4
+ tie = ""
+ tie_notation = ""
+ if end_tie:
+ tie += tie_xml % "stop"
+ tie_notation += tie_notation_xml % "stop"
+ second_note = None
+ if duration - dur_processed > 0:
+ second_note = generate_note (duration-dur_processed, True)
+ tie += tie_xml % "start"
+ tie_notation += tie_notation_xml % "start"
+ note = """ <note>
+ <pitch>
+ <step>C</step>
+ <octave>5</octave>
+ </pitch>
+ <duration>%s</duration>
+%s <voice>1</voice>
+ <type>%s</type>
+%s%s </note>""" % (dur_processed, tie, notetype, dot, tie_notation)
+ if second_note:
+ return "%s\n%s" % (note, second_note)
+ else:
+ return note
+
+def print_measure (nr, beats, type, params = "", attr = "", attr2 = "", barline = ""):
+ duration = 8*beats/type
+ note = generate_note (duration)
+
+ print """ <measure number="%s">
+ <attributes>
+%s <time%s>
+ <beats>%s</beats>
+ <beat-type>%s</beat-type>
+ </time>
+%s </attributes>
+%s
+%s </measure>""" % (nr, attr, params, beats, type, attr2, note, barline)
+
+first_key = """ <divisions>2</divisions>
+ <key>
+ <fifths>0</fifths>
+ <mode>major</mode>
+ </key>
+"""
+first_clef = """ <clef>
+ <sign>G</sign>
+ <line>2</line>
+ </clef>
+"""
+
+final_barline = """ <barline location="right">
+ <bar-style>light-heavy</bar-style>
+ </barline>
+"""
+
+print """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 1.0 Partwise//EN"
+ "http://www.musicxml.org/dtds/partwise.dtd">
+<score-partwise>
+ <identification>
+ <miscellaneous>
+ <miscellaneous-field name="description">Various time signatures: 2/2
+ (alla breve), 4/4 (C), 2/2, 3/2, 2/4, 3/4, 4/4, 5/4, 3/8, 6/8,
+ 12/8</miscellaneous-field>
+ </miscellaneous>
+ </identification>
+ <part-list>
+ <score-part id="P1">
+ <part-name>MusicXML Part</part-name>
+ </score-part>
+ </part-list>
+ <!--=========================================================-->
+ <part id="P1">"""
+
+measure = 1
+
+print_measure (measure, 2, 2, " symbol=\"common\"", first_key, first_clef)
+measure += 1
+
+print_measure (measure, 4, 4, " symbol=\"common\"")
+measure += 1
+
+print_measure (measure, 2, 2)
+measure += 1
+
+print_measure (measure, 3, 2)
+measure += 1
+
+print_measure (measure, 2, 4)
+measure += 1
+
+print_measure (measure, 3, 4)
+measure += 1
+
+print_measure (measure, 4, 4)
+measure += 1
+
+print_measure (measure, 5, 4)
+measure += 1
+
+print_measure (measure, 3, 8)
+measure += 1
+
+print_measure (measure, 6, 8)
+measure += 1
+
+print_measure (measure, 12, 8, "", "", "", final_barline)
+measure += 1
+
+print """ </part>
+</score-partwise>"""
--- /dev/null
+Open($1);
+MergeKern($2)
+
+
+# The AFM files of `New Century Schoolbook' family as distributed within the
+# urw-fonts-1.0.7pre41.tar.bz2 archive contain a bunch of ligatures which
+# shouldn't be active by default:
+#
+# T + M -> trademark
+# N + o -> afii61352
+# i + j -> ij
+# I + J -> IJ
+#
+# This font bundle is shipped by Fedora Core 6 and other GNU/Linux
+# distributions; we simply remove those ligatures.
+
+SelectIf("trademark", "trademark", \
+ "afii61352", "afii61352", \
+ "ij", "ij", \
+ "IJ", "IJ");
+if (Strtol($version) < 20070501)
+ RemoveATT("Ligature", "*", "*");
+else
+ RemovePosSub("*");
+endif
+
+Generate($3 + $fontname + ".otf");
+
+# EOF
--- /dev/null
+#!/usr/bin/env python
+import os
+import sys
+
+for i in sys.argv[1:]:
+ print os.path.realpath (i)
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Temporary script that helps translated docs sources conversion
+# for texi2html processing
+
+# USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES
+
+print "tely-gettext.py"
+
+import sys
+import re
+import os
+import gettext
+
+if len (sys.argv) > 3:
+ buildscript_dir, localedir, lang = sys.argv[1:4]
+else:
+ print """USAGE: tely-gettext.py PYTHON-DIR LOCALEDIR LANG FILES
+ For example scripts/auxiliar/tely-gettext.py python/out Documentation/po/out-www de Documentation/de/user/*.tely"""
+ sys.exit (1)
+
+sys.path.append (buildscript_dir)
+import langdefs
+
+double_punct_char_separator = langdefs.LANGDICT[lang].double_punct_char_sep
+t = gettext.translation('lilypond-doc', localedir, [lang])
+_doc = t.gettext
+
+include_re = re.compile (r'@include (.*?)$', re.M)
+whitespaces = re.compile (r'\s+')
+ref_re = re.compile (r'(?ms)@(ruser|rprogram|ref|rlearning)\{(.*?)\}')
+node_section_re = re.compile (r'@node (.*?)\n@((?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) (.*?)\n')
+menu_entry_re = re.compile (r'\* (.*?)::')
+
+def ref_gettext (m):
+ r = whitespaces.sub (' ', m.group (2))
+ return '@' + m.group (1) + '{' + _doc (r) + '}'
+
+def node_gettext (m):
+ return '@node ' + _doc (m.group (1)) + '\n@' + \
+ m.group (2) + ' ' + _doc (m.group (3)) + \
+ '\n@translationof ' + m.group (1) + '\n'
+
+def menu_entry_gettext (m):
+ return '* ' + _doc (m.group (1)) + '::'
+
+def process_file (filename):
+ print "Processing %s" % filename
+ f = open (filename, 'r')
+ page = f.read ()
+ f.close()
+ page = node_section_re.sub (node_gettext, page)
+ page = ref_re.sub (ref_gettext, page)
+ page = menu_entry_re.sub (menu_entry_gettext, page)
+ page = page.replace ("""-- SKELETON FILE --
+When you actually translate this file, please remove these lines as
+well as all `UNTRANSLATED NODE: IGNORE ME' lines.""", """@c -- SKELETON FILE --""")
+ page = page.replace ('UNTRANSLATED NODE: IGNORE ME', "@c UNTRANSLATED NODE: IGNORE ME")
+ includes = [whitespaces.sub ('', f) for f in include_re.findall (page)]
+ f = open (filename, 'w')
+ f.write (page)
+ f.close ()
+ dir = os.path.dirname (filename)
+ for file in includes:
+ p = os.path.join (dir, file)
+ if os.path.exists (p):
+ process_file (p)
+
+for filename in sys.argv[4:]:
+ process_file (filename)
--- /dev/null
+#!/usr/bin/env python
+# texi-langutils.py
+
+# WARNING: this script can't find files included in a different directory
+
+import sys
+import re
+import getopt
+import os
+
+import langdefs
+
+def read_pipe (command):
+ print command
+ pipe = os.popen (command)
+ output = pipe.read ()
+ if pipe.close ():
+ print "pipe failed: %(command)s" % locals ()
+ return output
+
+
+optlist, texi_files = getopt.getopt(sys.argv[1:],'no:d:b:i:l:',['skeleton', 'gettext'])
+process_includes = not ('-n', '') in optlist # -n don't process @include's in texinfo files
+
+make_gettext = ('--gettext', '') in optlist # --gettext generate a node list from a Texinfo source
+make_skeleton = ('--skeleton', '') in optlist # --skeleton extract the node tree from a Texinfo source
+
+output_file = 'doc.pot'
+
+# @untranslated should be defined as a macro in Texinfo source
+node_blurb = '''@untranslated
+'''
+doclang = ''
+head_committish = read_pipe ('git-rev-parse HEAD')
+intro_blurb = '''@c -*- coding: utf-8; mode: texinfo%(doclang)s -*-
+@c This file is part of %(topfile)s
+@ignore
+ Translation of GIT committish: %(head_committish)s
+ When revising a translation, copy the HEAD committish of the
+ version that you are working on. See TRANSLATION for details.
+@end ignore
+'''
+
+end_blurb = """
+@c -- SKELETON FILE --
+"""
+
+for x in optlist:
+ if x[0] == '-o': # -o NAME set PO output file name to NAME
+ output_file = x[1]
+ elif x[0] == '-d': # -d DIR set working directory to DIR
+ os.chdir (x[1])
+ elif x[0] == '-b': # -b BLURB set blurb written at each node to BLURB
+ node_blurb = x[1]
+ elif x[0] == '-i': # -i BLURB set blurb written at beginning of each file to BLURB
+ intro_blurb = x[1]
+ elif x[0] == '-l': # -l ISOLANG set documentlanguage to ISOLANG
+ doclang = '; documentlanguage: ' + x[1]
+
+texinfo_with_menus_re = re.compile (r"^(\*) +([^:\n]+)::.*?$|^@(include|menu|end menu|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.*?)$|@(rglos){(.+?)}", re.M)
+
+texinfo_re = re.compile (r"^@(include|node|(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?|top|chapter|(?:sub){0,2}section|(?:major|chap|(?:sub){0,2})heading) *(.+?)$|@(rglos){(.+?)}", re.M)
+
+ly_string_re = re.compile (r'^([a-zA-Z]+)[\t ]*=|%+[\t ]*(.*)$|\\(?:new|context)\s+(?:[a-zA-Z]*?(?:Staff(?:Group)?|Voice|FiguredBass|FretBoards|Names|Devnull))\s+=\s+"?([a-zA-Z]+)"?\s+')
+lsr_verbatim_ly_re = re.compile (r'% begin verbatim$')
+texinfo_verbatim_ly_re = re.compile (r'^@lilypond\[.*?verbatim')
+
+def process_texi (texifilename, i_blurb, n_blurb, write_skeleton, topfile, output_file=None, scan_ly=False):
+ try:
+ f = open (texifilename, 'r')
+ texifile = f.read ()
+ f.close ()
+ printedfilename = texifilename.replace ('../','')
+ includes = []
+
+ # process ly var names and comments
+ if output_file and (scan_ly or texifilename.endswith ('.ly')):
+ lines = texifile.splitlines ()
+ i = 0
+ in_verb_ly_block = False
+ if texifilename.endswith ('.ly'):
+ verbatim_ly_re = lsr_verbatim_ly_re
+ else:
+ verbatim_ly_re = texinfo_verbatim_ly_re
+ for i in range (len (lines)):
+ if verbatim_ly_re.search (lines[i]):
+ in_verb_ly_block = True
+ elif lines[i].startswith ('@end lilypond'):
+ in_verb_ly_block = False
+ elif in_verb_ly_block:
+ for (var, comment, context_id) in ly_string_re.findall (lines[i]):
+ if var:
+ output_file.write ('# ' + printedfilename + ':' + \
+ str (i + 1) + ' (variable)\n_(r"' + var + '")\n')
+ elif comment:
+ output_file.write ('# ' + printedfilename + ':' + \
+ str (i + 1) + ' (comment)\n_(r"' + \
+ comment.replace ('"', '\\"') + '")\n')
+ elif context_id:
+ output_file.write ('# ' + printedfilename + ':' + \
+ str (i + 1) + ' (context id)\n_(r"' + \
+ context_id + '")\n')
+
+ # process Texinfo node names and section titles
+ if write_skeleton:
+ g = open (os.path.basename (texifilename), 'w')
+ subst = globals ()
+ subst.update (locals ())
+ g.write (i_blurb % subst)
+ tutu = texinfo_with_menus_re.findall (texifile)
+ node_trigger = False
+ for item in tutu:
+ if item[0] == '*':
+ g.write ('* ' + item[1] + '::\n')
+ elif output_file and item[4] == 'rglos':
+ output_file.write ('_(r"' + item[5] + '") # @rglos in ' + printedfilename + '\n')
+ elif item[2] == 'menu':
+ g.write ('@menu\n')
+ elif item[2] == 'end menu':
+ g.write ('@end menu\n\n')
+ else:
+ g.write ('@' + item[2] + ' ' + item[3] + '\n')
+ if node_trigger:
+ g.write (n_blurb)
+ node_trigger = False
+ elif item[2] == 'include':
+ includes.append (item[3])
+ else:
+ if output_file:
+ output_file.write ('# @' + item[2] + ' in ' + \
+ printedfilename + '\n_(r"' + item[3].strip () + '")\n')
+ if item[2] == 'node':
+ node_trigger = True
+ g.write (end_blurb)
+ g.close ()
+
+ elif output_file:
+ toto = texinfo_re.findall (texifile)
+ for item in toto:
+ if item[0] == 'include':
+ includes.append(item[1])
+ elif item[2] == 'rglos':
+ output_file.write ('# @rglos in ' + printedfilename + '\n_(r"' + item[3] + '")\n')
+ else:
+ output_file.write ('# @' + item[0] + ' in ' + printedfilename + '\n_(r"' + item[1].strip () + '")\n')
+
+ if process_includes:
+ dir = os.path.dirname (texifilename)
+ for item in includes:
+ process_texi (os.path.join (dir, item.strip ()), i_blurb, n_blurb, write_skeleton, topfile, output_file, scan_ly)
+ except IOError, (errno, strerror):
+ sys.stderr.write ("I/O error(%s): %s: %s\n" % (errno, texifilename, strerror))
+
+
+if intro_blurb != '':
+ intro_blurb += '\n\n'
+if node_blurb != '':
+ node_blurb = '\n' + node_blurb + '\n\n'
+if make_gettext:
+ node_list_filename = 'node_list'
+ node_list = open (node_list_filename, 'w')
+ node_list.write ('# -*- coding: utf-8 -*-\n')
+ for texi_file in texi_files:
+ # Urgly: scan ly comments and variable names only in English doco
+ is_english_doc = 'Documentation/user' in texi_file
+ process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
+ os.path.basename (texi_file), node_list,
+ scan_ly=is_english_doc)
+ for word in ('Up:', 'Next:', 'Previous:', 'Appendix ', 'Footnotes', 'Table of Contents'):
+ node_list.write ('_(r"' + word + '")\n')
+ node_list.close ()
+ os.system ('xgettext -c -L Python --no-location -o ' + output_file + ' ' + node_list_filename)
+else:
+ for texi_file in texi_files:
+ process_texi (texi_file, intro_blurb, node_blurb, make_skeleton,
+ os.path.basename (texi_file))
--- /dev/null
+#!/usr/bin/env python
+# texi-skeleton-update.py
+
+import sys
+import glob
+import os
+import shutil
+
+sys.stderr.write ('texi-skeleton-update.py\n')
+
+orig_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[1] + '/*.ite??')])
+new_skeletons = set ([os.path.basename (f) for f in glob.glob (sys.argv[2] + '/*.ite??')])
+
+for f in new_skeletons:
+ if f in orig_skeletons:
+ g = open (os.path.join (sys.argv[1], f), 'r').read ()
+ if '-- SKELETON FILE --' in g:
+ sys.stderr.write ("Updating %s...\n" % f)
+ shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
+ elif f != 'fdl.itexi':
+ sys.stderr.write ("Copying new file %s...\n" % f)
+ shutil.copy (os.path.join (sys.argv[2], f), sys.argv[1])
+
+for f in orig_skeletons.difference (new_skeletons):
+ sys.stderr.write ("Warning: outdated skeleton file %s\n" % f)
--- /dev/null
+#!/usr/bin/env python
+
+"""
+USAGE: translations-status.py BUILDSCRIPT-DIR LOCALEDIR
+
+ This script must be run from Documentation/
+
+ Reads template files translations.template.html.in
+and for each LANG in LANGUAGES LANG/translations.template.html.in
+ Writes translations.html.in and for each LANG in LANGUAGES
+translations.LANG.html.in
+ Writes out/translations-status.txt
+ Updates word counts in TRANSLATION
+"""
+
+import sys
+import re
+import string
+import os
+
+import langdefs
+import buildlib
+
+def progress (str):
+ sys.stderr.write (str + '\n')
+
+progress ("translations-status.py")
+
+_doc = lambda s: s
+
+# load gettext messages catalogs
+translation = langdefs.translation
+
+
+language_re = re.compile (r'^@documentlanguage (.+)', re.M)
+comments_re = re.compile (r'^@ignore\n(.|\n)*?\n@end ignore$|@c .*?$', re.M)
+space_re = re.compile (r'\s+', re.M)
+lilypond_re = re.compile (r'@lilypond({.*?}|(.|\n)*?\n@end lilypond$)', re.M)
+node_re = re.compile ('^@node .*?$', re.M)
+title_re = re.compile ('^@(top|chapter|(?:sub){0,2}section|' + \
+'(?:unnumbered|appendix)(?:(?:sub){0,2}sec)?) (.*?)$', re.M)
+include_re = re.compile ('^@include (.*?)$', re.M)
+
+translators_re = re.compile (r'^@c\s+Translators\s*:\s*(.*?)$', re.M | re.I)
+checkers_re = re.compile (r'^@c\s+Translation\s*checkers\s*:\s*(.*?)$',
+ re.M | re.I)
+status_re = re.compile (r'^@c\s+Translation\s*status\s*:\s*(.*?)$', re.M | re.I)
+post_gdp_re = re.compile ('post.GDP', re.I)
+untranslated_node_str = '@untranslated'
+skeleton_str = '-- SKELETON FILE --'
+
+section_titles_string = _doc ('Section titles')
+last_updated_string = _doc (' <p><i>Last updated %s</i></p>\n')
+detailed_status_heads = [_doc ('Translators'), _doc ('Translation checkers'),
+ _doc ('Translated'), _doc ('Up to date'),
+ _doc ('Other info')]
+format_table = {
+ 'not translated': {'color':'d0f0f8', 'short':_doc ('no'), 'abbr':'NT',
+ 'long':_doc ('not translated')},
+ 'partially translated': {'color':'dfef77',
+ 'short':_doc ('partially (%(p)d %%)'),
+ 'abbr':'%(p)d%%',
+ 'long':_doc ('partially translated (%(p)d %%)')},
+ 'fully translated': {'color':'1fff1f', 'short':_doc ('yes'), 'abbr':'FT',
+ 'long': _doc ('translated')},
+ 'up to date': {'short':_doc ('yes'), 'long':_doc ('up to date'),
+ 'abbr':'100%%', 'vague':_doc ('up to date')},
+ 'outdated': {'short':_doc ('partially'), 'abbr':'%(p)d%%',
+ 'vague':_doc ('partially up to date')},
+ 'N/A': {'short':_doc ('N/A'), 'abbr':'N/A', 'color':'d587ff', 'vague':''},
+ 'pre-GDP':_doc ('pre-GDP'),
+ 'post-GDP':_doc ('post-GDP')
+}
+
+texi_level = {
+# (Unumbered/Numbered/Lettered, level)
+ 'top': ('u', 0),
+ 'unnumbered': ('u', 1),
+ 'unnumberedsec': ('u', 2),
+ 'unnumberedsubsec': ('u', 3),
+ 'chapter': ('n', 1),
+ 'section': ('n', 2),
+ 'subsection': ('n', 3),
+ 'appendix': ('l', 1)
+}
+
+appendix_number_trans = string.maketrans ('@ABCDEFGHIJKLMNOPQRSTUVWXY',
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
+
+class SectionNumber (object):
+ def __init__ (self):
+ self.__data = [[0,'u']]
+
+ def __increase_last_index (self):
+ type = self.__data[-1][1]
+ if type == 'l':
+ self.__data[-1][0] = \
+ self.__data[-1][0].translate (appendix_number_trans)
+ elif type == 'n':
+ self.__data[-1][0] += 1
+
+ def format (self):
+ if self.__data[-1][1] == 'u':
+ return ''
+ return '.'.join ([str (i[0]) for i in self.__data if i[1] != 'u']) + ' '
+
+ def increase (self, (type, level)):
+ if level == 0:
+ self.__data = [[0,'u']]
+ while level + 1 < len (self.__data):
+ del self.__data[-1]
+ if level + 1 > len (self.__data):
+ self.__data.append ([0, type])
+ if type == 'l':
+ self.__data[-1][0] = '@'
+ if type == self.__data[-1][1]:
+ self.__increase_last_index ()
+ else:
+ self.__data[-1] = ([0, type])
+ if type == 'l':
+ self.__data[-1][0] = 'A'
+ elif type == 'n':
+ self.__data[-1][0] = 1
+ return self.format ()
+
+
+def percentage_color (percent):
+ p = percent / 100.0
+ if p < 0.33:
+ c = [hex (int (3 * p * b + (1 - 3 * p) * a))[2:]
+ for (a, b) in [(0xff, 0xff), (0x5c, 0xa6), (0x5c, 0x4c)]]
+ elif p < 0.67:
+ c = [hex (int ((3 * p - 1) * b + (2 - 3 * p) * a))[2:]
+ for (a, b) in [(0xff, 0xff), (0xa6, 0xff), (0x4c, 0x3d)]]
+ else:
+ c = [hex (int ((3 * p - 2) * b + 3 * (1 - p) * a))[2:]
+ for (a, b) in [(0xff, 0x1f), (0xff, 0xff), (0x3d, 0x1f)]]
+ return ''.join (c)
+
+
+def update_word_count (text, filename, word_count):
+ return re.sub (r'(?m)^(\d+) *' + filename,
+ str (word_count).ljust (6) + filename,
+ text)
+
+po_msgid_re = re.compile (r'^msgid "(.*?)"(?:\n"(.*?)")*', re.M)
+
+def po_word_count (po_content):
+ s = ' '.join ([''.join (t) for t in po_msgid_re.findall (po_content)])
+ return len (space_re.split (s))
+
+sgml_tag_re = re.compile (r'<.*?>', re.S)
+
+def sgml_word_count (sgml_doc):
+ s = sgml_tag_re.sub ('', sgml_doc)
+ return len (space_re.split (s))
+
+def tely_word_count (tely_doc):
+ '''
+ Calculate word count of a Texinfo document node by node.
+
+ Take string tely_doc as an argument.
+ Return a list of integers.
+
+ Texinfo comments and @lilypond blocks are not included in word counts.
+ '''
+ tely_doc = comments_re.sub ('', tely_doc)
+ tely_doc = lilypond_re.sub ('', tely_doc)
+ nodes = node_re.split (tely_doc)
+ return [len (space_re.split (n)) for n in nodes]
+
+
+class TelyDocument (object):
+ def __init__ (self, filename):
+ self.filename = filename
+ self.contents = open (filename).read ()
+
+ ## record title and sectionning level of first Texinfo section
+ m = title_re.search (self.contents)
+ if m:
+ self.title = m.group (2)
+ self.level = texi_level [m.group (1)]
+ else:
+ self.title = 'Untitled'
+ self.level = ('u', 1)
+
+ m = language_re.search (self.contents)
+ if m:
+ self.language = m.group (1)
+
+ included_files = [os.path.join (os.path.dirname (filename), t)
+ for t in include_re.findall (self.contents)]
+ self.included_files = [p for p in included_files if os.path.exists (p)]
+
+ def print_title (self, section_number):
+ return section_number.increase (self.level) + self.title
+
+
+class TranslatedTelyDocument (TelyDocument):
+ def __init__ (self, filename, masterdocument, parent_translation=None):
+ TelyDocument.__init__ (self, filename)
+
+ self.masterdocument = masterdocument
+ if not hasattr (self, 'language') \
+ and hasattr (parent_translation, 'language'):
+ self.language = parent_translation.language
+ if hasattr (self, 'language'):
+ self.translation = translation[self.language]
+ else:
+ self.translation = lambda x: x
+ self.title = self.translation (self.title)
+
+ ## record authoring information
+ m = translators_re.search (self.contents)
+ if m:
+ self.translators = [n.strip () for n in m.group (1).split (',')]
+ else:
+ self.translators = parent_translation.translators
+ m = checkers_re.search (self.contents)
+ if m:
+ self.checkers = [n.strip () for n in m.group (1).split (',')]
+ elif isinstance (parent_translation, TranslatedTelyDocument):
+ self.checkers = parent_translation.checkers
+ else:
+ self.checkers = []
+
+ ## check whether translation is pre- or post-GDP
+ m = status_re.search (self.contents)
+ if m:
+ self.post_gdp = bool (post_gdp_re.search (m.group (1)))
+ else:
+ self.post_gdp = False
+
+ ## record which parts (nodes) of the file are actually translated
+ self.partially_translated = not skeleton_str in self.contents
+ nodes = node_re.split (self.contents)
+ self.translated_nodes = [not untranslated_node_str in n for n in nodes]
+
+ ## calculate translation percentage
+ master_total_word_count = sum (masterdocument.word_count)
+ translation_word_count = \
+ sum ([masterdocument.word_count[k] * self.translated_nodes[k]
+ for k in range (min (len (masterdocument.word_count),
+ len (self.translated_nodes)))])
+ self.translation_percentage = \
+ 100 * translation_word_count / master_total_word_count
+
+ ## calculate how much the file is outdated
+ (diff_string, error) = \
+ buildlib.check_translated_doc (masterdocument.filename, self.filename, self.contents)
+ if error:
+ sys.stderr.write ('warning: %s: %s' % (self.filename, error))
+ self.uptodate_percentage = None
+ else:
+ diff = diff_string.splitlines ()
+ insertions = sum ([len (l) - 1 for l in diff
+ if l.startswith ('+')
+ and not l.startswith ('+++')])
+ deletions = sum ([len (l) - 1 for l in diff
+ if l.startswith ('-')
+ and not l.startswith ('---')])
+ outdateness_percentage = 50.0 * (deletions + insertions) / \
+ (masterdocument.size + 0.5 * (deletions - insertions))
+ self.uptodate_percentage = 100 - int (outdateness_percentage)
+ if self.uptodate_percentage > 100:
+ alternative = 50
+ progress ("%s: strange uptodateness percentage %d %%, \
+setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
+ self.uptodate_percentage = alternative
+ elif self.uptodate_percentage < 1:
+ alternative = 1
+ progress ("%s: strange uptodateness percentage %d %%, \
+setting to %d %%" % (self.filename, self.uptodate_percentage, alternative))
+ self.uptodate_percentage = alternative
+
+ def completeness (self, formats=['long'], translated=False):
+ if translated:
+ translation = self.translation
+ else:
+ translation = lambda x: x
+
+ if isinstance (formats, str):
+ formats = [formats]
+ p = self.translation_percentage
+ if p == 0:
+ status = 'not translated'
+ elif p == 100:
+ status = 'fully translated'
+ else:
+ status = 'partially translated'
+ return dict ([(f, translation (format_table[status][f]) % locals())
+ for f in formats])
+
+ def uptodateness (self, formats=['long'], translated=False):
+ if translated:
+ translation = self.translation
+ else:
+ translation = lambda x: x
+
+ if isinstance (formats, str):
+ formats = [formats]
+ p = self.uptodate_percentage
+ if p == None:
+ status = 'N/A'
+ elif p == 100:
+ status = 'up to date'
+ else:
+ status = 'outdated'
+ l = {}
+ for f in formats:
+ if f == 'color' and p != None:
+ l['color'] = percentage_color (p)
+ else:
+ l[f] = translation (format_table[status][f]) % locals ()
+ return l
+
+ def gdp_status (self):
+ if self.post_gdp:
+ return self.translation (format_table['post-GDP'])
+ else:
+ return self.translation (format_table['pre-GDP'])
+
+ def short_html_status (self):
+ s = ' <td>'
+ if self.partially_translated:
+ s += '<br>\n '.join (self.translators) + '<br>\n'
+ if self.checkers:
+ s += ' <small>' + \
+ '<br>\n '.join (self.checkers) + '</small><br>\n'
+
+ c = self.completeness (['color', 'long'])
+ s += ' <span style="background-color: #%(color)s">\
+%(long)s</span><br>\n' % c
+
+ if self.partially_translated:
+ u = self.uptodateness (['vague', 'color'])
+ s += ' <span style="background-color: #%(color)s">\
+%(vague)s</span><br>\n' % u
+
+ s += ' </td>\n'
+ return s
+
+ def text_status (self):
+ s = self.completeness ('abbr')['abbr'] + ' '
+
+ if self.partially_translated:
+ s += self.uptodateness ('abbr')['abbr'] + ' '
+ return s
+
+ def html_status (self, numbering=SectionNumber ()):
+ if self.title == 'Untitled':
+ return ''
+
+ if self.level[1] == 0: # if self is a master document
+ s = '''<table align="center" border="2">
+ <tr align="center">
+ <th>%s</th>''' % self.print_title (numbering)
+ s += ''.join ([' <th>%s</th>\n' % self.translation (h)
+ for h in detailed_status_heads])
+ s += ' </tr>\n'
+ s += ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
+ % (self.translation (section_titles_string),
+ sum (self.masterdocument.word_count))
+
+ else:
+ s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
+ % (self.print_title (numbering),
+ sum (self.masterdocument.word_count))
+
+ if self.partially_translated:
+ s += ' <td>' + '<br>\n '.join (self.translators) + '</td>\n'
+ s += ' <td>' + '<br>\n '.join (self.checkers) + '</td>\n'
+ else:
+ s += ' <td></td>\n' * 2
+
+ c = self.completeness (['color', 'short'], translated=True)
+ s += ' <td><span style="background-color: #%(color)s">\
+%(short)s</span></td>\n' % {'color': c['color'],
+ 'short': c['short']}
+
+ if self.partially_translated:
+ u = self.uptodateness (['short', 'color'], translated=True)
+ s += ' <td><span style="background-color: #%(color)s">\
+%(short)s</span></td>\n' % {'color': u['color'],
+ 'short': u['short']}
+ else:
+ s += ' <td></td>\n'
+
+ s += ' <td>' + self.gdp_status () + '</td>\n </tr>\n'
+ s += ''.join ([i.translations[self.language].html_status (numbering)
+ for i in self.masterdocument.includes
+ if self.language in i.translations])
+
+ if self.level[1] == 0: # if self is a master document
+ s += '</table>\n<p></p>\n'
+ return s
+
+class MasterTelyDocument (TelyDocument):
+ def __init__ (self,
+ filename,
+ parent_translations=dict ([(lang, None)
+ for lang in langdefs.LANGDICT])):
+ TelyDocument.__init__ (self, filename)
+ self.size = len (self.contents)
+ self.word_count = tely_word_count (self.contents)
+ translations = dict ([(lang, os.path.join (lang, filename))
+ for lang in langdefs.LANGDICT])
+ self.translations = \
+ dict ([(lang,
+ TranslatedTelyDocument (translations[lang],
+ self, parent_translations.get (lang)))
+ for lang in langdefs.LANGDICT
+ if os.path.exists (translations[lang])])
+ if self.translations:
+ self.includes = [MasterTelyDocument (f, self.translations)
+ for f in self.included_files]
+ else:
+ self.includes = []
+
+ def update_word_counts (self, s):
+ s = update_word_count (s, self.filename, sum (self.word_count))
+ for i in self.includes:
+ s = i.update_word_counts (s)
+ return s
+
+ def html_status (self, numbering=SectionNumber ()):
+ if self.title == 'Untitled' or not self.translations:
+ return ''
+ if self.level[1] == 0: # if self is a master document
+ s = '''<table align="center" border="2">
+ <tr align="center">
+ <th>%s</th>''' % self.print_title (numbering)
+ s += ''.join ([' <th>%s</th>\n' % l for l in self.translations])
+ s += ' </tr>\n'
+ s += ' <tr align="left">\n <td>Section titles<br>(%d)</td>\n' \
+ % sum (self.word_count)
+
+ else: # if self is an included file
+ s = ' <tr align="left">\n <td>%s<br>(%d)</td>\n' \
+ % (self.print_title (numbering), sum (self.word_count))
+
+ s += ''.join ([t.short_html_status ()
+ for t in self.translations.values ()])
+ s += ' </tr>\n'
+ s += ''.join ([i.html_status (numbering) for i in self.includes])
+
+ if self.level[1] == 0: # if self is a master document
+ s += '</table>\n<p></p>\n'
+ return s
+
+ def text_status (self, numbering=SectionNumber (), colspec=[48,12]):
+ if self.title == 'Untitled' or not self.translations:
+ return ''
+
+ s = ''
+ if self.level[1] == 0: # if self is a master document
+ s += (self.print_title (numbering) + ' ').ljust (colspec[0])
+ s += ''.join (['%s'.ljust (colspec[1]) % l
+ for l in self.translations])
+ s += '\n'
+ s += ('Section titles (%d)' % \
+ sum (self.word_count)).ljust (colspec[0])
+
+ else:
+ s = '%s (%d) ' \
+ % (self.print_title (numbering), sum (self.word_count))
+ s = s.ljust (colspec[0])
+
+ s += ''.join ([t.text_status ().ljust(colspec[1])
+ for t in self.translations.values ()])
+ s += '\n\n'
+ s += ''.join ([i.text_status (numbering) for i in self.includes])
+
+ if self.level[1] == 0:
+ s += '\n'
+ return s
+
+
+update_category_word_counts_re = re.compile (r'(?ms)^-(\d+)-(.*?\n)\d+ *total')
+
+counts_re = re.compile (r'(?m)^(\d+) ')
+
+def update_category_word_counts_sub (m):
+ return '-' + m.group (1) + '-' + m.group (2) + \
+ str (sum ([int (c)
+ for c in counts_re.findall (m.group (2))])).ljust (6) + \
+ 'total'
+
+
+progress ("Reading documents...")
+
+tely_files = \
+ buildlib.read_pipe ("find -maxdepth 2 -name '*.tely'")[0].splitlines ()
+tely_files.sort ()
+master_docs = [MasterTelyDocument (os.path.normpath (filename))
+ for filename in tely_files]
+master_docs = [doc for doc in master_docs if doc.translations]
+
+main_status_page = open ('translations.template.html.in').read ()
+
+enabled_languages = [l for l in langdefs.LANGDICT
+ if langdefs.LANGDICT[l].enabled
+ and l != 'en']
+lang_status_pages = \
+ dict ([(l, open (os.path.join (l, 'translations.template.html.in')). read ())
+ for l in enabled_languages])
+
+progress ("Generating status pages...")
+
+date_time = buildlib.read_pipe ('LANG= date -u')[0]
+
+main_status_html = last_updated_string % date_time
+main_status_html += '\n'.join ([doc.html_status () for doc in master_docs])
+
+html_re = re.compile ('<html>', re.I)
+end_body_re = re.compile ('</body>', re.I)
+
+html_header = '''<html>
+<!-- This page is automatically generated by translation-status.py from
+translations.template.html.in; DO NOT EDIT !-->'''
+
+main_status_page = html_re.sub (html_header, main_status_page)
+
+main_status_page = end_body_re.sub (main_status_html + '\n</body>',
+ main_status_page)
+
+open ('translations.html.in', 'w').write (main_status_page)
+
+for l in enabled_languages:
+ date_time = buildlib.read_pipe ('LANG=%s date -u' % l)[0]
+ lang_status_pages[l] = translation[l] (last_updated_string) % date_time + lang_status_pages[l]
+ lang_status_page = html_re.sub (html_header, lang_status_pages[l])
+ html_status = '\n'.join ([doc.translations[l].html_status ()
+ for doc in master_docs
+ if l in doc.translations])
+ lang_status_page = end_body_re.sub (html_status + '\n</body>',
+ lang_status_page)
+ open (os.path.join (l, 'translations.html.in'), 'w').write (lang_status_page)
+
+main_status_txt = '''Documentation translations status
+Generated %s
+NT = not translated
+FT = fully translated
+
+''' % date_time
+
+main_status_txt += '\n'.join ([doc.text_status () for doc in master_docs])
+
+status_txt_file = 'out/translations-status.txt'
+progress ("Writing %s..." % status_txt_file)
+open (status_txt_file, 'w').write (main_status_txt)
+
+translation_instructions_file = 'TRANSLATION'
+progress ("Updating %s..." % translation_instructions_file)
+translation_instructions = open (translation_instructions_file).read ()
+
+for doc in master_docs:
+ translation_instructions = doc.update_word_counts (translation_instructions)
+
+for html_file in re.findall (r'(?m)^\d+ *(\S+?\.html\S*?)(?: |$)',
+ translation_instructions):
+ word_count = sgml_word_count (open (html_file).read ())
+ translation_instructions = update_word_count (translation_instructions,
+ html_file,
+ word_count)
+
+for po_file in re.findall (r'(?m)^\d+ *(\S+?\.po\S*?)(?: |$)',
+ translation_instructions):
+ word_count = po_word_count (open (po_file).read ())
+ translation_instructions = update_word_count (translation_instructions,
+ po_file,
+ word_count)
+
+translation_instructions = \
+ update_category_word_counts_re.sub (update_category_word_counts_sub,
+ translation_instructions)
+
+open (translation_instructions_file, 'w').write (translation_instructions)
--- /dev/null
+#!/usr/bin/env python
+# update-snippets.py
+
+# USAGE: update-snippets.py REFERENCE-DIR TARGET-DIR FILES
+#
+# update ly snippets in TARGET-DIR/FILES with snippets from REFERENCE-DIR/FILES
+#
+# More precisely, each existing FILE in TARGET-DIR is matched to the FILE in
+# REFERENCE-DIR (it the latter does not exist, a warning is given).
+#
+# Shell wildcards expansion is performed on FILES.
+# This script currently supports Texinfo format.
+# Ly snippets preceded with a line containing '@c KEEP LY' in TARGET-DIR/FILES
+# will not be updated.
+# An error occurs if REFERENCE-DIR/FILE and TARGET-DIR/FILE do not have the
+# same snippets count.
+
+import sys
+import os
+import glob
+import re
+
+print "update-snippets.py"
+
+comment_re = re.compile (r'(?<!@)(@c(?:omment)? .*?\n|^@ignore\n.*?\n@end ignore\n)', re.M | re.S)
+snippet_re = re.compile (r'^(@lilypond(?:file)?(?:\[.*?\])?\s*\{.+?\}|@lilypond(?:\[.*?\])?(?:.|\n)+?@end lilypond)', re.M)
+
+
+def snippet_split (l):
+ r = []
+ for s in [s for s in l if s]:
+ if s.startswith ('@c ') or s.startswith ('@ignore\n') or s.startswith ('@comment '):
+ r.append(s)
+ else:
+ r += [t for t in snippet_re.split (s) if t]
+ return r
+
+def count_snippet (l):
+ k = 0
+ for s in l:
+ if s.startswith ('@lilypond'):
+ k += 1
+ return k
+
+def find_next_snippet (l, k):
+ while not l[k].startswith ('@lilypond'):
+ k += 1
+ return k
+
+exit_code = 0
+
+def update_exit_code (code):
+ global exit_code
+ exit_code = max (code, exit_code)
+
+ref_dir, target_dir = sys.argv [1:3]
+file_patterns = sys.argv[3:]
+
+total_snippet_count = 0
+changed_snippets_count = 0
+
+for pattern in file_patterns:
+ files = glob.glob (os.path.join (target_dir, pattern))
+ for file in files:
+ ref_file = os.path.join (ref_dir, os.path.basename (file))
+ if not os.path.isfile (ref_file):
+ sys.stderr.write ("Warning: %s: no such file.\nReference file for %s not found.\n" % (ref_file, file))
+ continue
+ f = open (file, 'r')
+ target_source = comment_re.split (f.read ())
+ f.close ()
+ if reduce (lambda x, y: x or y, ['-- SKELETON FILE --' in s for s in target_source]):
+ sys.stderr.write ("Skipping skeleton file %s\n" % file)
+ continue
+ g = open (ref_file, 'r')
+ ref_source = comment_re.split (g.read ())
+ target_source = snippet_split (target_source)
+ ref_source = snippet_split (ref_source)
+ if '' in target_source or '' in ref_source:
+ raise "AAAAARGH: unuseful empty string"
+ snippet_count = count_snippet (target_source)
+ if not snippet_count == count_snippet (ref_source):
+ update_exit_code (1)
+ sys.stderr.write ("Error: %s and %s have different snippet counts.\n\
+Update translation by at least adding a @lilypond block where necessary, then rerun this script.\n" % (ref_file, file))
+ continue
+ total_snippet_count += snippet_count
+ c = 0
+ k = -1
+ for j in range (len (target_source)):
+ if target_source[j].startswith ('@lilypond'):
+ k = find_next_snippet (ref_source, k+1)
+ if j > 0 and (not target_source[j-1].startswith ('@c KEEP LY')) and target_source[j] != ref_source[k]:
+ target_source[j] = ref_source[k]
+ c += 1
+ changed_snippets_count += 1
+ f = open (file, 'w')
+ f.write (''.join (target_source))
+ sys.stderr.write ('%s: %d/%d snippets updated\n' % (file, c, snippet_count))
+
+sys.stderr.write ('\nTotal: %d snippets, %d updated snippets.\n' % (total_snippet_count, changed_snippets_count))
+sys.exit (exit_code)
# $(outdir)/$(INFO_IMAGES_DIR)/*.png symlinks are only needed to view
# out-www/*.info with Emacs -- HTML docs no longer need these
# symlinks, see replace_symlinks_urls in
-# python/aux/postprocess_html.py.
+# python/auxiliar/postprocess_html.py.
# make dereferences symlinks, and $(INFO_IMAGES_DIR) is a symlink
# to $(outdir), so we can't use directly $(INFO_IMAGES_DIR) as a