install-WWW:
-$(INSTALL) -m 755 -d $(DESTDIR)$(webdir)
- cp -a $(outdir)/web-root/ $(DESTDIR)$(webdir)/
-
+ rsync -rl $(outdir)/offline-root/ $(DESTDIR)$(webdir)
$(MAKE) -C Documentation/user local-install-WWW
$(MAKE) -C Documentation/user install-info
final-install:
@true
-web-ext = html midi pdf png txt ly signature
-
-# For docball, issue `make web CONTENT_NEGOTIATION='
-CONTENT_NEGOTIATION = --content-negotiation
-footify = $(PYTHON) $(step-bindir)/add-html-footer.py --name $(PACKAGE_NAME) --version $(TOPLEVEL_VERSION) $(CONTENT_NEGOTIATION)
-footifymail = MAILADDRESS='http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs'
-
+# For online docs with content negotiation, issue `make web WEB_TARGETS=online'
+# For both online and offline docs, issue `make web WEB_TARGETS="offline online"'
+WEB_TARGETS = offline
local-WWW-post:
# need UTF8 setting in case this is hosted on a website.
echo -e 'AddDefaultCharset utf-8\nAddCharset utf-8 .html\nAddCharset utf-8 .en\nAddCharset utf-8 .nl\nAddCharset utf-8 .txt\n' > $(top-build-dir)/.htaccess
$(PYTHON) $(buildscript-dir)/mutopia-index.py -o $(outdir)/examples.html input/
- echo '<META HTTP-EQUIV="refresh" content="0;URL=Documentation/index.html">' > $(outdir)/index.html
- echo '<html><body>Redirecting to the documentation index...</body></html>' >> $(outdir)/index.html
-
- cd $(top-build-dir) && $(FIND) . -name '*.html' -print | $(footifymail) xargs $(footify)
-
- cd $(top-build-dir) && find Documentation input \
- $(web-ext:%=-path '*/out-www/*.%' -or) -type l \
- | grep -v 'lily-[0-9a-f]*.*pdf' \
- | grep -v '/fr/' \
- > $(outdir)/weblist
- ls $(outdir)/*.html >> $(outdir)/weblist
-
-## urg: this is too hairy, should write a python script to do this.
-
-## rewrite file names so we lose out-www
- rm -rf $(outdir)/web-root/
- mkdir $(outdir)/web-root/
-## urg slow.
- cat $(outdir)/weblist | (cd $(top-build-dir); tar -cf- -T- ) | \
- tar -C $(outdir)/web-root/ -xf -
- for dir in $(outdir)/web-root/ ; do \
- cd $$dir && \
- for a in `find . -name out-www`; do \
- rsync -a --link-dest $$a/ $$a/ $$a/.. ; \
- rm -rf $$a ; \
- done \
- done
+ rm -rf $(outdir)/online-root
+ rm -rf $(outdir)/offline-root
+ $(PYTHON) $(buildscript-dir)/www_post.py $(PACKAGE_NAME) $(TOPLEVEL_VERSION) $(buildscript-dir) $(outdir) "$(WEB_TARGETS)"
tree-prefix = $(outdir)
tree-bin = $(tree-prefix)/bin
--- /dev/null
+#!@PYTHON@
+
+"""
+Print a nice footer.
+"""
+import re
+import os
+import time
+
+import langdefs
+
+default_header = r"""
+"""
+
+default_footer = r'''
+<div style="background-color: #e8ffe8; padding: 2; border: #c0ffc0 1px solid;">
+<p>
+<font size="-1">
+This page is for %(package_name)s-%(package_version)s (%(branch_str)s). <br>
+</font>
+<address><font size="-1">
+Report errors to <a href="%(mail_address_url)s">%(mail_address)s</a>.</font></address>
+</p>
+</div>
+'''
+
+header_tag = '<!-- header_tag -->'
+footer_tag = '<!-- footer_tag -->'
+
+def _ (s):
+ return s
+
+language_available = _ ("Other languages: %s.") % "%(language_menu)s"
+browser_language = _ ("Using <A HREF='%s'>automatic language selection</A>.") \
+ % "/web/about/browser-language"
+
+LANGUAGES_TEMPLATE = '''\
+<P>
+ %(language_available)s
+ <BR>
+ %(browser_language)s
+</P>
+''' % vars ()
+
+
+html_re = re.compile ('(.*?)(?:[.]([^/.]*))?[.]html$')
+
+def build_pages_dict (filelist):
+ """Build dictionnary of available translations of each page"""
+ pages_dict = {}
+ for f in filelist:
+ m = html_re.match (f)
+ if m:
+ g = m.groups()
+ if len (g) <= 1 or g[1] == None:
+ e = ''
+ else:
+ e = g[1]
+ if not g[0] in pages_dict.keys():
+ pages_dict[g[0]] = [e]
+ else:
+ pages_dict[g[0]].append (e)
+ return pages_dict
+
+
+def do_file (prefix, lang_ext, target, header, footer, pages_dict, out_root, name_filter,
+ package_name, package_version, branch_str, mail_address_url, mail_address):
+ file_name = langdefs.lang_file_name (prefix, lang_ext, '.html')
+ in_f = open (file_name)
+ s = in_f.read()
+ in_f.close()
+
+ s = re.sub ('%', '%%', s)
+
+ ### add header
+ if re.search (header_tag, s) == None:
+ body = '<BODY BGCOLOR=WHITE TEXT=BLACK>'
+ s = re.sub ('(?i)<body>', body, s)
+ if re.search ('(?i)<BODY', s):
+ s = re.sub ('(?i)<body[^>]*>', body + header, s, 1)
+ elif re.search ('(?i)<html', s):
+ s = re.sub ('(?i)<html>', '<HTML>' + header, s, 1)
+ else:
+ s = header + s
+
+ s = header_tag + '\n' + s
+
+ if re.search ('(?i)<!DOCTYPE', s) == None:
+ doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
+ s = doctype + s
+
+ # remove info's annoying's indication of referencing external document
+ s = re.sub (' \((lilypond|lilypond-internals|music-glossary)\)</a>', '</a>', s)
+
+ # urg
+ # maybe find first node?
+ fallback_web_title = '-- --'
+ m = re.match ('.*?<title>(.*?)</title>', s, re.DOTALL)
+ if m:
+ fallback_web_title = m.group (1)
+ s = re.sub ('@WEB-TITLE@', fallback_web_title, s)
+
+ ### add footer
+ page_flavors = {}
+ if re.search (footer_tag, s) == None:
+ if re.search ('(?i)</body', s):
+ s = re.sub ('(?i)</body>', footer_tag + footer + '\n' + '</BODY>', s, 1)
+ elif re.search ('(?i)</html', s):
+ s = re.sub ('(?i)</html>', footer_tag + footer + '\n' + '</HTML>', s, 1)
+ else:
+ s += footer_tag + footer + '\n'
+
+ # Find available translations of this page.
+ available = []
+ missing = []
+ for l in langdefs.LANGUAGES:
+ e = l.webext
+ if lang_ext != e:
+ if e in pages_dict[prefix]:
+ available.append (l)
+ elif lang_ext == '' and l.enabled: # English version of missing translated pages will be written
+ missing.append (e)
+
+ if target == 'online':
+ # Strip .html, .png suffix for auto language selection (content
+ # negotiation). The menu must keep the full extension, so do
+ # this before adding the menu.
+ page_flavors[file_name] = re.sub (
+ '''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html|.png)(#[^"\']*|)[\'"]''',
+ '\\1="\\2\\4"', s)
+ elif target == 'offline':
+ if lang_ext == '':
+ page_flavors[file_name] = s
+ for e in missing:
+ page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = re.sub (
+ '''href=[\'"]([^/][.]*[^.:\'"]*)(.html)(#[^"\']*|)[\'"]''',
+ 'href="\\1.' + e + '\\2\\3"', s)
+ else:
+ page_flavors[file_name] = re.sub (
+ '''href=[\'"]([^/][.]*[^.:\'"]*)(.html)(#[^"\']*|)[\'"]''',
+ 'href="\\1.' + lang_ext + '\\2\\3"', s)
+
+ # Add menu after stripping: must not have autoselection for language menu.
+ language_menu = ''
+ for lang in available:
+ lang_file = lang.file_name (os.path.basename (prefix), '.html')
+ if language_menu != '':
+ language_menu += ', '
+ language_menu += '<a href="%s">%s</a>' % (lang_file, lang.name)
+
+ languages = ''
+ if language_menu:
+ languages = LANGUAGES_TEMPLATE % vars ()
+
+ # Put language menu before '</body>' and '</html>' tags
+ for k in page_flavors.keys():
+ if re.search ('(?i)</body', page_flavors[k]):
+ page_flavors[k] = re.sub ('(?i)</body>', languages + '</BODY>', page_flavors[k], 1)
+ elif re.search ('(?i)</html', page_flavors[k]):
+ page_flavors[k] = re.sub ('(?i)</html>', languages + '</HTML>', page_flavors[k], 1)
+ else:
+ page_flavors[k] += languages
+ else:
+ for e in [l.webext for l in langdefs.LANGUAGES]:
+ if not e in pages_dict[prefix]:
+ page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = s
+
+ for k in page_flavors.keys():
+ page_flavors[k] = page_flavors[k] % vars ()
+
+ out_f = open (os.path.join (out_root, name_filter (k)), 'w')
+ out_f.write (page_flavors[k])
+ out_f.close()
+
+
+def add_html_footer (package_name = '',
+ package_version = '',
+ header = default_header,
+ footer = default_footer,
+ target = 'offline',
+ mail_address = '(address unknown)',
+ pages_dict = {},
+ out_root = '',
+ name_filter = lambda s: s):
+ """Add header, footer to a number of HTML files
+
+ Arguments:
+ package_name=NAME set package_name to NAME
+ package_version=VERSION set package version to VERSION
+ header=TEXT use TEXT as header
+ footer=TEXT use TEXT as footer
+ targets=offline|online set page processing depending on the target
+ offline is for reading HTML pages locally
+ online is for hosting the HTML pages on a website with content
+ negotiation
+ mail_address set \"Report errors to\" link
+ pages_dict a dictionnary returned by build_pages_dict()
+ out_root a path prefix where to write HTML pages
+ name_filter a HTML file name filter
+ """
+ localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
+
+ if re.search ("http://", mail_address):
+ mail_address_url = mail_address
+ else:
+ mail_address_url= 'mailto:' + mail_address
+
+ versiontup = package_version.split ('.')
+ branch_str = 'stable-branch'
+ if int ( versiontup[1]) % 2:
+ branch_str = 'development-branch'
+
+ for page, ext_list in pages_dict.items ():
+ for e in ext_list:
+ do_file (page, e, target, header, footer, pages_dict, out_root, name_filter,
+ package_name, package_version, branch_str, mail_address_url, mail_address)
+ # if the page is translated, a .en.html symlink is necessary for content negotiation
+ if target == 'online' and ext_list != ['']:
+ os.symlink (os.path.basename (page) + '.html', os.path.join (out_root, name_filter (page + '.en.html')))
--- /dev/null
+#!@PYTHON@
+
+"""
+Documentation i18n module
+"""
+
+def lang_file_name (p, langext, ext):
+ if langext != '':
+ return p + '.' + langext + ext
+ return p + ext
+
+class LanguageDef:
+ def __init__ (self, code, name, webext=None, double_punct_char_sep=''):
+ self.code = code
+ self.name = name
+ self.enabled = True
+ if webext == None:
+ self.webext = self.code
+ else:
+ self.webext = webext
+ self.double_punct_char_sep = double_punct_char_sep
+
+ def file_name (self, prefix, ext):
+ return lang_file_name (prefix, self.webext, ext)
+
+
+# All language information needed for documentation i18n is defined
+# here. For each 'Documentation/ab' directory containing docs
+# translated in 'ab', there should be an entry in LANGUAGES.
+
+site = LanguageDef ('en', 'English', webext='')
+fr = LanguageDef ('fr', 'French', double_punct_char_sep=' ')
+#nl = LanguageDef ('nl', 'Nederlands')
+
+# Outdated or broken translations may be disabled
+# (please run 'make web-clean' before doing that):
+#fr.enabled = False
+
+LANGUAGES = (site, fr)
+
+if __name__ == '__main__':
+ print ' '.join ([l.code for l in LANGUAGES if l.enabled and l.code != 'en'])
+else:
+ import gettext
+ LANGDICT = {}
+ for l in LANGUAGES:
+ LANGDICT[l.code] = l
--- /dev/null
+#!@PYTHON@
+
+import re
+import os
+
+def new_link_path (link, dir, r):
+ l = link.split ('/')
+ d = dir.split ('/')
+ i = 0
+ while i < len(d) and i < len(l) and l[i] == '..':
+ if r.match (d[i]):
+ del l[i]
+ else:
+ i += 1
+ return '/'.join ([x for x in l if not r.match (x)])
+
+def hardlink_tree (input_roots = [],
+ process_dirs = '.*',
+ strip_dir_names = '',
+ exclude_dirs = '',
+ process_files = '.*',
+ find_files = '',
+ exclude_files = '',
+ target_pattern = '',
+ targets = ['.']):
+ """Mirror trees for different targets by hardlinking files.
+
+ Arguments:
+ input_roots=DIRLIST use DIRLIST as input tree roots list
+ process_dir=PATTERN only process files in directories named PATTERN
+ strip_dir_names=PATTERN strip directories names matching PATTERN
+ (write their content to parent)
+ exclude_dir=PATTERN don't recurse into directories named PATTERN
+ process_files=PATTERN filters files which are hardlinked
+ find_files=PATTERN find files named PATTERN. The files list will be returned.
+ exclude_files=PATTERN exclude files named PATTERN
+ target_pattern=STRING use STRING as target root directory name pattern
+ targets=DIRLIST mkdir each directory in DIRLIST and mirrors the tree into each
+ """
+ process_files_re = re.compile (process_files)
+ find_files_re = re.compile (find_files)
+ exclude_dirs_re = re.compile (exclude_dirs)
+ exclude_files_re = re.compile (exclude_files)
+ process_dirs_re = re.compile (process_dirs)
+ strip_dir_names_re = re.compile (strip_dir_names)
+ do_strip_dir_names_re = re.compile ('/(?:' + strip_dir_names + ')')
+
+ found_files = []
+
+ if not '%s' in target_pattern:
+ target_pattern += '%s'
+ target_dirs = [target_pattern % s for s in targets]
+
+ map (os.mkdir, target_dirs)
+
+ for d in input_roots:
+ for in_dir, dirs, files in os.walk(d):
+ out_dir = strip_dir_names_re.sub ('', in_dir)
+ i = 0
+ while i < len(dirs):
+ if exclude_dirs_re.search (dirs[i]):
+ del dirs[i]
+ else:
+ if os.path.islink (os.path.join (in_dir, dirs[i])):
+ files.append (dirs[i])
+ i += 1
+ if not strip_dir_names_re.match (os.path.basename (in_dir)):
+ for t in target_dirs:
+ p = os.path.join (t, out_dir)
+ if not os.path.isdir (p):
+ os.mkdir (p)
+ if not process_dirs_re.search (in_dir):
+ continue
+ for f in files:
+ if exclude_files_re.match (f):
+ continue
+ in_file = os.path.join (in_dir, f)
+ if find_files_re.match (f):
+ found_files.append (in_file)
+ if os.path.islink (in_file): # all symlinks are assumed to be relative and to point to files in the input trees
+ link_path = new_link_path (os.path.normpath (os.readlink (in_file)), in_dir, do_strip_dir_names_re)
+ for t in target_dirs:
+ os.symlink (link_path, os.path.join (t, out_dir, f))
+ elif process_files_re.match (f):
+ for t in target_dirs:
+ os.link (in_file, os.path.join (t, out_dir, f))
+ return found_files
--- /dev/null
+#!@PYTHON@
+
+## This is www_post.py. This script is the main stage
+## of toplevel GNUmakefile local-WWW-post target.
+
+# USAGE: www_post PACKAGE_NAME TOPLEVEL_VERSION BUILDSCRIPT-DIR OUTDIR TARGETS
+# please call me from top of the source directory
+
+import sys
+import os
+import re
+
+package_name, package_version, buildscript_dir, outdir, targets = sys.argv[1:]
+targets = targets.split (' ')
+outdir = os.path.normpath (outdir)
+doc_dirs = ['input', 'Documentation', outdir]
+target_pattern = os.path.join (outdir, '%s-root')
+
+static_files = {os.path.join (outdir, 'index.html'):
+ '''<META HTTP-EQUIV="refresh" content="0;URL=Documentation/index.html">
+<html><body>Redirecting to the documentation index...</body></html>\n''',
+ os.path.join (outdir, 'VERSION'):
+ package_version + '\n' }
+
+for f in static_files.keys():
+ open (f, 'w').write (static_files[f])
+
+
+sys.path.append (buildscript_dir)
+import mirrortree
+import add_html_footer
+import langdefs
+
+sys.stderr.write ("Mirrorring...\n")
+html_list = mirrortree.hardlink_tree (input_roots = doc_dirs,
+ process_dirs = outdir,
+ strip_dir_names = outdir,
+ exclude_dirs = '(' +
+ '|'.join ([l.code for l in langdefs.LANGUAGES]) +
+ r'|po|out|\w*?-root)(/|$)',
+ process_files = r'.*?\.(?:midi|pdf|png|txt|ly|signature)$|VERSION',
+ exclude_files = r'lily-[0-9a-f]+.*\.pdf',
+ target_pattern = target_pattern,
+ targets = targets)
+html_dict = add_html_footer.build_pages_dict (html_list)
+strip_re = re.compile (outdir + '/')
+for t in targets:
+ sys.stderr.write ("Processing HTML pages for %s target...\n" % t)
+ add_html_footer.add_html_footer (
+ package_name = package_name,
+ package_version = package_version,
+ target = t,
+ mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs',
+ pages_dict = html_dict,
+ out_root = target_pattern % t,
+ name_filter = lambda s: strip_re.sub ('', s))
+++ /dev/null
-#!@PYTHON@
-
-"""
-Print a nice footer. add the top of the ChangeLog file (up to the ********)
-"""
-import re
-import sys
-import os
-import time
-import string
-import getopt
-
-index_url=''
-top_url=''
-changelog_file=''
-content_negotiation = False
-package_name = ''
-package_version = ''
-
-mail_address = '(address unknown)'
-try:
- mail_address= os.environ['MAILADDRESS']
-except KeyError:
- pass
-
-mail_address_url= 'mailto:' + mail_address
-if re.search ("http://", mail_address):
- mail_address_url = mail_address
-
-webmaster= mail_address
-try:
- webmaster= os.environ['WEBMASTER']
-except KeyError:
- pass
-
-header_file = ''
-footer_file = ''
-default_header = r"""
-"""
-
-
-#wiki_base = 'http://afavant.elte.hu/lywiki/'
-wiki_base = None
-
-
-default_footer = r"""<hr>Please take me <a href=@INDEX@>back to the index</a>
-of @PACKAGE_NAME@
-"""
-
-built = r'''
-<div style="background-color: #e8ffe8; padding: 2; border: #c0ffc0 1px solid;">
-%(wiki_string)s
-<p>
-<font size="-1">
-This page is for %(package_name)s-%(package_version)s (%(branch_str)s). <br>
-</font>
-<address><font size="-1">
-Report errors to <a href="%(mail_address_url)s">%(mail_address)s</a>.</font></address>
-</p>
-</div>
-'''
-
-
-
-
-def help ():
- sys.stdout.write (r"""Usage: add-html-footer [OPTIONS]... HTML-FILE
-Add header, footer and top of ChangLog file (up to the ********) to HTML-FILE
-
-Options:
- --changelog=FILE use FILE as ChangeLog [ChangeLog]
- --content-negotiation strip .html and .png from urls
- --footer=FILE use FILE as footer
- --header=FILE use FILE as header
- -h, --help print this help
- --index=URL set homepage to URL
- --name=NAME set package_name to NAME
- --version=VERSION set package version to VERSION
-
-""")
- sys.exit (0)
-
-(options, files) = getopt.getopt(sys.argv[1:], 'h', [
- 'changelog=', 'footer=', 'header=', 'help', 'index=',
- 'name=', 'content-negotiation', 'version='])
-
-for opt in options:
- o = opt[0]
- a = opt[1]
- if o == '--changelog':
- changelog_file = a
- elif o == '--content-negotiation':
- content_negotiation = True
- elif o == '--footer':
- footer_file = a
- elif o == '--header':
- header_file = a
- elif o == '-h' or o == '--help':
- help ()
- # urg, this is top!
- elif o == '--index':
- index_url = a
- elif o == '--name':
- package_name = a
- elif o == '--version':
- package_version = a
- else:
- raise 'unknown opt ', o
-
-
-def compose (default, file):
- s = default
- if file:
- s = open (file).read ()
- return s
-
-localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
-
-if os.path.basename (index_url) != "index.html":
- index_url = os.path.join (index_url , "index.html")
-top_url = os.path.dirname (index_url) + "/"
-
-header = compose (default_header, header_file)
-
-# compose (default_footer, footer_file)
-footer = built
-header_tag = '<!-- header_tag -->'
-footer_tag = '<!-- footer_tag -->'
-
-# Python < 1.5.2 compatibility
-#
-# On most platforms, this is equivalent to
-#`normpath(join(os.getcwd()), PATH)'. *Added in Python version 1.5.2*
-if os.path.__dict__.has_key ('abspath'):
- abspath = os.path.abspath
-else:
- def abspath (path):
- return os.path.normpath (os.path.join (os.getcwd (), path))
-
-
-def remove_self_ref (s):
- self_url = abspath (os.getcwd () + '/' + f)
- #sys.stderr.write ('url0: %s\n' % self_url)
-
- # self_url = re.sub ('.*?' + string.lower (package_name) + '[^/]*/',
- # '', self_url)
- # URG - this only works when source tree is unpacked in `src/' dir
- # For some reason, .*? still eats away
- # /home/fred/usr/src/lilypond-1.5.14/Documentation/user/out-www/lilypond/
- # instead of just
- #
- # /home/fred/usr/src/lilypond-1.5.14/
- #
- # Tutorial.html
- self_url = re.sub ('.*?src/' + string.lower (package_name) + '[^/]*/',
- '', self_url)
-
- #sys.stderr.write ('url1: %s\n' % self_url)
-
- #urg, ugly lily-specific toplevel index hack
- self_url = re.sub ('.*topdocs/out-www/index.html', 'index.html', self_url)
- #sys.stderr.write ('url2: %s\n' % self_url)
-
- # ugh, python2.[12] re is broken.
- ## pat = re.compile ('.*?(<a href="[\./]*' + self_url + '#?[^"]*">)([^<]*)(</a>)', re.DOTALL)
- pat = re.compile ('[.\n]*?(<a href="[\./]*' + self_url + '#?[^"]*">)([^<]*)(</a>)')
- m = pat.search (s)
- while m:
- #sys.stderr.write ('self: %s\n' % m.group (2))
- s = s[:m.start (1)] + m.group (2) + s[m.end (3):]
- m = pat.search (s)
- return s
-
-def do_file (f):
- if os.path.islink (f):
- return
-
- s = open (f).read()
- s = re.sub ('%', '%%', s)
-
-
- if re.search (header_tag, s) == None:
- body = '<BODY BGCOLOR=WHITE TEXT=BLACK>'
- s = re.sub ('(?i)<body>', body, s)
- if re.search ('(?i)<BODY', s):
- s = re.sub ('(?i)<body[^>]*>', body + header, s, 1)
- elif re.search ('(?i)<html', s):
- s = re.sub ('(?i)<html>', '<HTML>' + header, s, 1)
- else:
- s = header + s
-
- s = header_tag + '\n' + s
-
- if re.search ('(?i)<!DOCTYPE', s) == None:
- doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
- s = doctype + s
-
- if re.search (footer_tag, s) == None:
- if re.search ('(?i)</body', s):
- s = re.sub ('(?i)</body>', footer_tag + footer + '\n' + '</BODY>', s, 1)
- elif re.search ('(?i)</html', s):
- s = re.sub ('(?i)</html>', footer_tag + footer + '\n' + '</HTML>', s, 1)
- else:
- s = s + footer_tag + footer + '\n'
-
- s = i18n (f, s)
-
- #URUGRGOUSNGUOUNRIU
- index = index_url
- top = top_url
- if os.path.basename (f) == "index.html":
- cwd = os.getcwd ()
- if os.path.basename (cwd) == "topdocs":
- index = "index.html"
- top = ""
-
- # don't cause ///////index.html entries in log files.
- # index = "./index.html"
- # top = "./"
-
- versiontup = string.split(package_version, '.')
- branch_str = 'stable-branch'
- if string.atoi ( versiontup[1]) % 2:
- branch_str = 'development-branch'
-
- wiki_page = ('v%s.%s-' % (versiontup[0], versiontup[1]) + f)
- wiki_page = re.sub ('out-www/', '', wiki_page)
- wiki_page = re.sub ('/', '-', wiki_page)
- wiki_page = re.sub (r'\.-', '', wiki_page)
- wiki_page = re.sub ('.html', '', wiki_page)
-
- wiki_string = ''
-
- if wiki_base:
- wiki_string = (r'''<a href="%(wiki_base)s%(wiki_page)s">Read </a> comments on this page, or
- <a href="%(wiki_base)s%(wiki_page)s?action=edit">add</a> one.''' %
- { 'wiki_base': wiki_base,
- 'wiki_page': wiki_page})
-
- subst = globals ()
- subst.update (locals())
- s = s % subst
-
- # urg
- # maybe find first node?
- fallback_web_title = '-- --'
-
- # ugh, python2.[12] re is broken.
- #m = re.match ('.*?<title>\(.*?\)</title>', s, re.DOTALL)
- m = re.match ('[.\n]*?<title>([.\n]*?)</title>', s)
- if m:
- fallback_web_title = m.group (1)
- s = re.sub ('@WEB-TITLE@', fallback_web_title, s)
-
- s = remove_self_ref (s)
-
- # remove info's annoying's indication of referencing external document
- s = re.sub (' \((lilypond|lilypond-internals|music-glossary)\)</a>',
- '</a>', s)
-
- if not os.path.islink (f):
- open (f, 'w').write (s)
-
-
-
-localedir = 'out/locale'
-try:
- import gettext
- gettext.bindtextdomain ('newweb', localedir)
- gettext.textdomain ('newweb')
- _ = gettext.gettext
-except:
- def _ (s):
- return s
-underscore = _
-
-C = 'site'
-LANGUAGES = (
- (C, 'English'),
- ('nl', 'Nederlands'),
- ('fr', 'French')
- )
-
-language_available = _ ("Other languages: %s.") % "%(language_menu)s"
-browser_language = _ ("Using <A HREF='%s'>automatic language selection</A>.") \
- % "%(root_url)sabout/browser-language"
-
-LANGUAGES_TEMPLATE = '''\
-<P>
- %(language_available)s
- <BR>
- %(browser_language)s
-</P>
-''' % vars ()
-
-def file_lang (file, lang):
- (base, ext) = os.path.splitext (file)
- base = os.path.splitext (base)[0]
- if lang and lang != C:
- return base + '.' + lang + ext
- return base + ext
-
-
-def i18n (file_name, page):
- # ugh
- root_url = "/web/"
-
- base_name = os.path.basename (file_name)
-
- lang = C
- m = re.match ('.*[.]([^/.]*).html', file_name)
- if m:
- lang = m.group (1)
-
- # Find available translations of this page.
- available = filter (lambda x: lang != x[0] \
- and os.path.exists (file_lang (file_name, x[0])),
- LANGUAGES)
-
- # Strip .html, .png suffix for auto language selection (content
- # negotiation). The menu must keep the full extension, so do
- # this before adding the menu.
- if content_negotiation:
- page = re.sub ('''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html|.png)(#[^"\']*|)[\'"]''',
- '\\1="\\2\\4"', page)
-
- # Add menu after stripping: must not have autoselection for language menu.
- language_menu = ''
- for (prefix, name) in available:
- lang_file = file_lang (base_name, prefix)
- if language_menu != '':
- language_menu += ', '
- language_menu += '<a href="%(lang_file)s">%(name)s</a>' % vars ()
-
- languages = ''
- if language_menu:
- languages = LANGUAGES_TEMPLATE % vars ()
-
- # Put language menu before '</body>' and '</html>' tags
- if re.search ('(?i)</body', page):
- page = re.sub ('(?i)</body>', languages + '</BODY>', page, 1)
- elif re.search ('(?i)</html', page):
- page = re.sub ('(?i)</html>', languages + '</HTML>', page, 1)
- else:
- page = page + languages
-
- if content_negotiation and language_menu:
- os.symlink (file_name, os.path.splitext (os.path.basename (file_name))[0] + '.en.html')
-
- return page
-
-for f in files:
- do_file (f)
-