- s = gulp_file (f)
-
- if changelog_file:
- changes = gulp_file (changelog_file)
- # urg?
- #m = re.search ('^\\\\*\\\\*', changes)
- m = re.search (r'\*\*\*', changes)
- if m:
- changes = changes[:m.start (0)]
- s = re.sub ('top_of_ChangeLog', '<pre>\n'+ changes + '\n</pre>\n', s)
-
- if re.search (header_tag, s) == None:
- body = '<BODY BGCOLOR=WHITE TEXT=BLACK>'
- s = re.sub ('(?i)<body>', body, s)
- if re.search ('(?i)<BODY', s):
- s = re.sub ('(?i)<body[^>]*>', body + header, s, 1)
- elif re.search ('(?i)<html', s):
- s = re.sub ('(?i)<html>', '<HTML>' + header, s, 1)
- else:
- s = header + s
-
- s = header_tag + '\n' + s
-
- if re.search ('(?i)<!DOCTYPE', s) == None:
- doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
- s = doctype + s
-
- if re.search (footer_tag, s) == None:
- s = s + footer_tag + '\n'
-
- if re.search ('(?i)</body', s):
- s = re.sub ('(?i)</body>', footer + '</BODY>', s, 1)
- elif re.search ('(?i)</html', s):
- s = re.sub ('(?i)</html>', footer + '</HTML>', s, 1)
- else:
- s = s + footer
-
-
- #URUGRGOUSNGUOUNRIU
- index = index_url
- top = top_url
- if os.path.basename (f) == "index.html":
- cwd = os.getcwd ()
- if os.path.basename (cwd) == "topdocs":
- index = "index.html"
- top = ""
-
- # don't cause ///////index.html entries in log files.
- # index = "./index.html"
- # top = "./"
-
-
- versiontup = string.split(package_version, '.')
- branch_str = 'stable-branch'
- if string.atoi ( versiontup[1]) % 2:
- branch_str = 'development-branch'
-
- s = re.sub ('@INDEX@', index, s)
- s = re.sub ('@TOP@', top, s)
- s = re.sub ('@PACKAGE_NAME@', package_name, s)
- s = re.sub ('@PACKAGE_VERSION@', package_version, s)
- s = re.sub ('@WEBMASTER@', webmaster, s)
- s = re.sub ('@GCOS@', gcos, s)
- s = re.sub ('@LOCALTIME@', localtime, s)
- s = re.sub ('@MAILADDRESS@', mail_address, s)
- s = re.sub ('@BRANCH@', branch_str, s)
-
- # ugh, python2.[12] re is broken.
- #pat = re.compile ('.*?<!--\s*(@[^@]*@)\s*=\s*([^\s]*)\s*-->', re.DOTALL)
- pat = re.compile ('[.\n]*?<!--\s*(@[^@]*@)\s*=\s*([^\s]*)\s*-->')
- m = pat.search (s)
- while m:
- at_var = m.group (1)
- at_val = m.group (2)
- sys.stderr.write ('at: %s -> %s\n' % (at_var, at_val))
- s = re.sub (at_var, at_val, s)
- m = pat.search (s)
-
- # urg
- # maybe find first node?
- fallback_web_title = '-- --'
-
- # ugh, python2.[12] re is broken.
- #m = re.match ('.*?<title>\(.*?\)</title>', s, re.DOTALL)
- m = re.match ('[.\n]*?<title>([.\n]*?)</title>', s)
- if m:
- fallback_web_title = m.group (1)
- s = re.sub ('@WEB-TITLE@', fallback_web_title, s)
-
- s = remove_self_ref (s)
-
- open (f, 'w').write (s)
-
+ s = open (f).read()
+ s = re.sub ('%', '%%', s)
+
+
+ if re.search (header_tag, s) == None:
+ body = '<BODY BGCOLOR=WHITE TEXT=BLACK>'
+ s = re.sub ('(?i)<body>', body, s)
+ if re.search ('(?i)<BODY', s):
+ s = re.sub ('(?i)<body[^>]*>', body + header, s, 1)
+ elif re.search ('(?i)<html', s):
+ s = re.sub ('(?i)<html>', '<HTML>' + header, s, 1)
+ else:
+ s = header + s
+
+ s = header_tag + '\n' + s
+
+ if re.search ('(?i)<!DOCTYPE', s) == None:
+ doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
+ s = doctype + s
+
+ if re.search (footer_tag, s) == None:
+ if re.search ('(?i)</body', s):
+ s = re.sub ('(?i)</body>', footer_tag + footer + '\n' + '</BODY>', s, 1)
+ elif re.search ('(?i)</html', s):
+ s = re.sub ('(?i)</html>', footer_tag + footer + '\n' + '</HTML>', s, 1)
+ else:
+ s = s + footer_tag + footer + '\n'
+
+ s = i18n (f, s)
+
+ #URUGRGOUSNGUOUNRIU
+ index = index_url
+ top = top_url
+ if os.path.basename (f) == "index.html":
+ cwd = os.getcwd ()
+ if os.path.basename (cwd) == "topdocs":
+ index = "index.html"
+ top = ""
+
+ # don't cause ///////index.html entries in log files.
+ # index = "./index.html"
+ # top = "./"
+
+ versiontup = string.split(package_version, '.')
+ branch_str = 'stable-branch'
+ if string.atoi ( versiontup[1]) % 2:
+ branch_str = 'development-branch'
+
+ wiki_page = ('v%s.%s-' % (versiontup[0], versiontup[1]) + f)
+ wiki_page = re.sub ('out-www/', '', wiki_page)
+ wiki_page = re.sub ('/', '-', wiki_page)
+ wiki_page = re.sub (r'\.-', '', wiki_page)
+ wiki_page = re.sub ('.html', '', wiki_page)
+
+ wiki_string = ''
+
+ if wiki_base:
+ wiki_string = (r'''<a href="%(wiki_base)s%(wiki_page)s">Read </a> comments on this page, or
+ <a href="%(wiki_base)s%(wiki_page)s?action=edit">add</a> one.''' %
+ { 'wiki_base': wiki_base,
+ 'wiki_page': wiki_page})
+
+ subst = globals ()
+ subst.update (locals())
+ s = s % subst
+
+ # urg
+ # maybe find first node?
+ fallback_web_title = '-- --'
+
+ # ugh, python2.[12] re is broken.
+ #m = re.match ('.*?<title>\(.*?\)</title>', s, re.DOTALL)
+ m = re.match ('[.\n]*?<title>([.\n]*?)</title>', s)
+ if m:
+ fallback_web_title = m.group (1)
+ s = re.sub ('@WEB-TITLE@', fallback_web_title, s)
+
+ s = remove_self_ref (s)
+
+ # remove info's annoying's indication of referencing external document
+ s = re.sub (' \((lilypond|lilypond-internals|music-glossary)\)</a>',
+ '</a>', s)
+
+ open (f, 'w').write (s)
+
+
+
+localedir = 'out/locale'
+try:
+ import gettext
+ gettext.bindtextdomain ('newweb', localedir)
+ gettext.textdomain ('newweb')
+ _ = gettext.gettext
+except:
+ def _ (s):
+ return s
+underscore = _
+
+C = 'site'
+LANGUAGES = (
+ (C, 'English'),
+ ('nl', 'Nederlands'),
+ ('fr', 'French')
+ )
+
+language_available = _ ("Other languages: %s.") % "%(language_menu)s"
+browser_language = _ ("Using <A HREF='%s'>automatic language selection</A>.") \
+ % "%(root_url)sabout/browser-language"
+
+LANGUAGES_TEMPLATE = '''\
+<P>
+ %(language_available)s
+ <BR>
+ %(browser_language)s
+</P>
+''' % vars ()
+
+def file_lang (file, lang):
+ (base, ext) = os.path.splitext (file)
+ base = os.path.splitext (base)[0]
+ if lang and lang != C:
+ return base + '.' + lang + ext
+ return base + ext
+
+
+def i18n (file_name, page):
+ # ugh
+ root_url = "/web/"
+
+ base_name = os.path.basename (file_name)
+
+ lang = C
+ m = re.match ('.*[.]([^/.]*).html', file_name)
+ if m:
+ lang = m.group (1)
+
+ # Find available translations of this page.
+ available = filter (lambda x: lang != x[0] \
+ and os.path.exists (file_lang (file_name, x[0])),
+ LANGUAGES)
+
+ # Strip .html, .png suffix for auto language selection (content
+ # negotiation). The menu must keep the full extension, so do
+ # this before adding the menu.
+ if content_negotiation:
+ page = re.sub ('''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html|.png)(#[^"\']*|)[\'"]''',
+ '\\1="\\2\\4"', page)
+
+ # Add menu after stripping: must not have autoselection for language menu.
+ language_menu = ''
+ for (prefix, name) in available:
+ lang_file = file_lang (base_name, prefix)
+ if language_menu != '':
+ language_menu += ', '
+ language_menu += '<a href="%(lang_file)s">%(name)s</a>' % vars ()
+
+ languages = ''
+ if language_menu:
+ languages = LANGUAGES_TEMPLATE % vars ()
+
+ # Put language menu before '</body>' and '</html>' tags
+ if re.search ('(?i)</body', page):
+ page = re.sub ('(?i)</body>', languages + '</BODY>', page, 1)
+ elif re.search ('(?i)</html', page):
+ page = re.sub ('(?i)</html>', languages + '</HTML>', page, 1)
+ else:
+ page = page + languages
+
+ return page