import time
import string
import getopt
-import __main__
-fullname = "unknown"
-changelog_file = ''
-index_file=''
-banner_file = ''
+gcos = "unknown"
+index_url=''
+top_url=''
changelog_file=''
-changes =''
+package_name = ''
package_version = ''
mail_address = '(address unknown)'
except KeyError:
pass
-
webmaster= mail_address
try:
webmaster= os.environ['WEBMASTER']
except KeyError:
pass
+header_file = ''
+footer_file = ''
+default_header = r"""
+"""
+
+
+#wiki_base = 'http://afavant.elte.hu/lywiki/'
+wiki_base = None
-footer_fn = ''
-footer = r"""<hr>Please take me <a href=%s>back to the index</a>
-of %s
-<!-- package name %s>
- <!-- webmaster fields. %s %s>
+default_footer = r"""<hr>Please take me <a href=@INDEX@>back to the index</a>
+of @PACKAGE_NAME@
"""
-builtstr = r"""<hr><font size=-1>
-This page was built from %s-%s by
-<address><br>%s <<a href=\"mailto:%s\">%s</a>>, %s.</address><p></font>"""
+built = r"""
+<div style="background-color: #e8ffe8; padding: 2; border: #c0ffc0 1px solid;">
+%(wiki_string)s
+<p>
+<font size="-1">
+This page is for %(package_name)s-%(package_version)s (%(branch_str)s). <br>
+</font>
+<address><font size="-1">
+Report errors to <<a href="mailto:%(mail_address)s">%(mail_address)s</a>>.</font></address>
+</div>
-package_name = ''
-(options, files) = getopt.getopt(sys.argv[1:], 'c:hp:', [
- 'name=', 'footer=', 'version=',
- 'changelog=', 'help', 'news=', 'index='])
+"""
+
-def gulp_file(f):
+def gulp_file (f):
try:
- i = open(f)
- i.seek (0, 2)
- n = i.tell ()
- i.seek (0,0)
+ i = open(f)
+ i.seek (0, 2)
+ n = i.tell ()
+ i.seek (0,0)
except:
- sys.stderr.write ("can't open file: %s\n" % f)
- return ''
+ sys.stderr.write ("can't open file: %s\n" % f)
+ return ''
s = i.read (n)
if len (s) <= 0:
- sys.stderr.write ("gulped empty file: %s\n" % f)
+ sys.stderr.write ("gulped empty file: %s\n" % f)
i.close ()
return s
def help ():
- sys.stdout.write (r"""Usage: add-html-footer [OPTION]... HTML-FILE
-Add a nice footer, add the top of the ChangLog file (up to the ********)
+ sys.stdout.write (r"""Usage: add-html-footer [OPTIONS]... HTML-FILE
+Add header, footer and top of ChangLog file (up to the ********) to HTML-FILE
+
Options:
--h, --help print this help
---version package version
---name package_name
---footer footer file.
+ --changelog=FILE use FILE as ChangeLog [ChangeLog]
+ --footer=FILE use FILE as footer
+ --header=FILE use FILE as header
+ -h, --help print this help
+ --index=URL set homepage to URL
+ --name=NAME set package_name to NAME
+ --version=VERSION set package version to VERSION
+
""")
sys.exit (0)
+(options, files) = getopt.getopt(sys.argv[1:], 'h', [
+ 'changelog=', 'footer=', 'header=', 'help', 'index=',
+ 'name=', 'version='])
+
for opt in options:
o = opt[0]
a = opt[1]
- if o == '--news' or o == '--changelog' or o == '-c':
+ if o == '--changelog':
changelog_file = a
- elif o == '--index':
- index_file = a
elif o == '--footer':
- footer_fn = a
- elif o == '--name':
- package_name = a
+ footer_file = a
+ elif o == '--header':
+ header_file = a
elif o == '-h' or o == '--help':
help ()
+ # urg, this is top!
+ elif o == '--index':
+ index_url = a
+ elif o == '--name':
+ package_name = a
elif o == '--version':
package_version = a
else:
raise 'unknown opt ', o
-def set_vars():
+
+#burp?
+def set_gcos ():
+ global gcos
os.environ["CONFIGSUFFIX"] = 'www';
if os.name == 'nt':
import ntpwd
pw = ntpwd.getpwname(os.environ['USERNAME'])
else:
import pwd
- pw = pwd.getpwuid (os.getuid());
+ if os.environ.has_key('FAKEROOTKEY') and os.environ.has_key('LOGNAME'):
+ pw = pwd.getpwnam (os.environ['LOGNAME'])
+ else:
+ pw = pwd.getpwuid (os.getuid())
- f =pw[4]
+ f = pw[4]
f = string.split (f, ',')[0]
- __main__.fullname=f
-set_vars ()
+ gcos = f
+def compose (default, file):
+ s = default
+ if file:
+ s = gulp_file (file)
+ return s
-def footstr(index):
- ft = __main__.footer
+set_gcos ()
+localtime = time.strftime ('%c %Z', time.localtime (time.time ()))
+
+if os.path.basename (index_url) != "index.html":
+ index_url = os.path.join (index_url , "index.html")
+top_url = os.path.dirname (index_url) + "/"
+
+header = compose (default_header, header_file)
+
+# compose (default_footer, footer_file)
+footer = built
+header_tag = '<!-- header_tag -->'
+footer_tag = '<!-- footer_tag -->'
+
+# Python < 1.5.2 compatibility
+#
+# On most platforms, this is equivalent to
+#`normpath(join(os.getcwd()), PATH)'. *Added in Python version 1.5.2*
+if os.path.__dict__.has_key ('abspath'):
+ abspath = os.path.abspath
+else:
+ def abspath (path):
+ return os.path.normpath (os.path.join (os.getcwd (), path))
+
+
+def remove_self_ref (s):
+ self_url = abspath (os.getcwd () + '/' + f)
+ #sys.stderr.write ('url0: %s\n' % self_url)
+
+ # self_url = re.sub ('.*?' + string.lower (package_name) + '[^/]*/',
+ # '', self_url)
+ # URG - this only works when source tree is unpacked in `src/' dir
+ # For some reason, .*? still eats away
+ # /home/fred/usr/src/lilypond-1.5.14/Documentation/user/out-www/lilypond/
+ # instead of just
+ #
+ # /home/fred/usr/src/lilypond-1.5.14/
+ #
+ # Tutorial.html
+ self_url = re.sub ('.*?src/' + string.lower (package_name) + '[^/]*/',
+ '', self_url)
+
+ #sys.stderr.write ('url1: %s\n' % self_url)
+
+ #urg, ugly lily-specific toplevel index hack
+ self_url = re.sub ('.*topdocs/out-www/index.html', 'index.html', self_url)
+ #sys.stderr.write ('url2: %s\n' % self_url)
+
+ # ugh, python2.[12] re is broken.
+ ## pat = re.compile ('.*?(<a href="[\./]*' + self_url + '#?[^"]*">)([^<]*)(</a>)', re.DOTALL)
+ pat = re.compile ('[.\n]*?(<a href="[\./]*' + self_url + '#?[^"]*">)([^<]*)(</a>)')
+ m = pat.search (s)
+ while m:
+ #sys.stderr.write ('self: %s\n' % m.group (2))
+ s = s[:m.start (1)] + m.group (2) + s[m.end (3):]
+ m = pat.search (s)
+ return s
- if footer_fn:
- try:
- ft = open (footer_fn).read ()
- except:
- raise 'oops: ' , footer_fn
+def do_file (f):
+ s = gulp_file (f)
+ s = re.sub ('%', '%%', s)
+
+
+ if re.search (header_tag, s) == None:
+ body = '<BODY BGCOLOR=WHITE TEXT=BLACK>'
+ s = re.sub ('(?i)<body>', body, s)
+ if re.search ('(?i)<BODY', s):
+ s = re.sub ('(?i)<body[^>]*>', body + header, s, 1)
+ elif re.search ('(?i)<html', s):
+ s = re.sub ('(?i)<html>', '<HTML>' + header, s, 1)
+ else:
+ s = header + s
+
+ s = header_tag + '\n' + s
+
+ if re.search ('(?i)<!DOCTYPE', s) == None:
+ doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
+ s = doctype + s
+
+ if re.search (footer_tag, s) == None:
+ s = s + footer_tag + '\n'
+
+ if re.search ('(?i)</body', s):
+ s = re.sub ('(?i)</body>', footer + '</BODY>', s, 1)
+ elif re.search ('(?i)</html', s):
+ s = re.sub ('(?i)</html>', footer + '</HTML>', s, 1)
+ else:
+ s = s + footer
+
+ s = i18n (f, s)
+
+ #URUGRGOUSNGUOUNRIU
+ index = index_url
+ top = top_url
+ if os.path.basename (f) == "index.html":
+ cwd = os.getcwd ()
+ if os.path.basename (cwd) == "topdocs":
+ index = "index.html"
+ top = ""
+
+ # don't cause ///////index.html entries in log files.
+ # index = "./index.html"
+ # top = "./"
+
+
+ versiontup = string.split(package_version, '.')
+ branch_str = 'stable-branch'
+ if string.atoi ( versiontup[1]) % 2:
+ branch_str = 'development-branch'
+
+ wiki_page = ('v%s.%s-' % (versiontup[0], versiontup[1]) + f)
+ wiki_page = re.sub ('out-www/', '', wiki_page)
+ wiki_page = re.sub ('/', '-', wiki_page)
+ wiki_page = re.sub (r'\.-', '', wiki_page)
+ wiki_page = re.sub ('.html', '', wiki_page)
+
+ wiki_string = ''
+
+ if wiki_base:
+ wiki_string = (r'''<a href="%(wiki_base)s%(wiki_page)s">Read </a> comments on this page, or
+ <a href="%(wiki_base)s%(wiki_page)s?action=edit">add</a> one.''' %
+ { 'wiki_base': wiki_base,
+ 'wiki_page': wiki_page})
+
+ subst = globals ()
+ subst.update (locals())
+ s = s % subst
+
+ # urg
+ # maybe find first node?
+ fallback_web_title = '-- --'
+
+ # ugh, python2.[12] re is broken.
+ #m = re.match ('.*?<title>\(.*?\)</title>', s, re.DOTALL)
+ m = re.match ('[.\n]*?<title>([.\n]*?)</title>', s)
+ if m:
+ fallback_web_title = m.group (1)
+ s = re.sub ('@WEB-TITLE@', fallback_web_title, s)
+
+ s = remove_self_ref (s)
+ # remove info's annoying's indication of referencing external document
+ s = re.sub (' \((lilypond|lilypond-internals|music-glossary)\)</a>',
+ '</a>', s)
- s = ft % (index, package_name, package_name, webmaster, webmaster)
- s = s + builtstr % (package_name, package_version, fullname,
- mail_address, mail_address,
- time.strftime ('%c %Z', time.localtime (time.time ())))
- return s
+ open (f, 'w').write (s)
-banner = footstr (index_file)
-banner_id = '<! banner_id >'
-if changelog_file:
- changes = gulp_file (changelog_file)
- m = re.search ('^\*\*', changes)
- if m:
- changes = changes[:m.start (0)]
+localedir = 'out/locale'
+try:
+ import gettext
+ gettext.bindtextdomain ('newweb', localedir)
+ gettext.textdomain ('newweb')
+ _ = gettext.gettext
+except:
+ def _ (s):
+ return s
+underscore = _
+LANGUAGES = (
+ ('site', 'English'),
+ ('nl', 'Nederlands'),
+ )
-def do_file (s):
- if changelog_file:
- s = re.sub ('top_of_ChangeLog', '<XMP>\n'+ changes + '\n</XMP>\n', s)
+language_available = _ ("Other languages: %s.") % "%(language_menu)s"
+browser_language = _ ("Using <A HREF='%s'>automatic language selection</A>.") \
+ % "%(root_url)sabout/browser-language"
+LANGUAGES_TEMPLATE = '''\
+<P>
+ %(language_available)s
+ <BR>
+ %(browser_language)s
+</P>
+''' % vars ()
- if re.search (banner_id, s) == None:
- s = banner_id + s
- else:
- return s
+def file_lang (file, lang):
+ (base, ext) = os.path.splitext (file)
+ base = os.path.splitext (base)[0]
+ if lang and lang != 'site':
+ return base + '.' + lang + ext
+ return base + ext
- s = re.sub ('(?i)<body>', '<BODY BGCOLOR=WHITE><FONT COLOR=BLACK>', s)
- # do title.
- #s = check_tag ('<body', '', s, 0)
- if re.search ('(?i)</body', s):
- s = re.sub ('(?i)</body>', banner + '</BODY>', s)
- elif re.search ('(?i)</html', s):
- s = re.sub ('(?i)</html>', banner + '</HTML>', s)
- else:
- s = s + banner
- return s
+def i18n (file_name, page):
+ # ugh
+ root_url = "/web/"
-for f in files:
- s = gulp_file (f)
- s = do_file (s)
- open (f, 'w').write (s)
+ base_name = os.path.basename (file_name)
-if 0:
- title = '<HEAD><TITLE>' \
- + package_name + ' -- ' + os.path.basename (os.path.splitext(f)[0]) \
- + '</TITLE></HEAD>'
- s = check_tag ('<title>', title, s, 0)
+ lang = 'site'
+ m = re.match ('.*[.]([^.]*).html', file_name)
+ if m:
+ lang = m.group (1)
- s = check_tag ('<html', '', s, 0)
- if regex.search ('<HTML', s) == -1:
- s = '<HTML>\n' + s
- s = check_tag ('</html>', '</HTML>', s, 1)
+ # Find available translations of this page.
+ available = filter (lambda x: lang != x[0] \
+ and os.path.exists (file_lang (file_name, x[0])),
+ LANGUAGES)
- dump_file (f, s)
+ # Strip .html, .png suffix for auto language selection.
+# page = re.sub ('''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html(#[^"]*)|.png)[\'"]''',
+# '\\1="\\2"', page)
+ # Create language menu.
+ language_menu = ''
+ for (prefix, name) in available:
+ lang_file = file_lang (base_name, prefix)
+ language_menu += '<a href="%(lang_file)s">%(name)s</a>' % vars ()
+
+ languages = ''
+ if language_menu:
+ languages = LANGUAGES_TEMPLATE % vars ()
+
+ return page + languages
+ ## end i18n
+
+for f in files:
+ do_file (f)