X-Git-Url: https://git.donarmstrong.com/?a=blobdiff_plain;f=stepmake%2Fbin%2Fadd-html-footer.py;h=08ff5ff061c3cd764ef082071b6e4e510e91b895;hb=9e69cb84d6ee5b0a861cd97869b10e3bdf0c833c;hp=d406d3719a65a4e3bfc8f159047336f611220e18;hpb=b07ef7805ea536741e20c3d1b8fc1bc463a844ba;p=lilypond.git diff --git a/stepmake/bin/add-html-footer.py b/stepmake/bin/add-html-footer.py index d406d3719a..08ff5ff061 100644 --- a/stepmake/bin/add-html-footer.py +++ b/stepmake/bin/add-html-footer.py @@ -10,28 +10,28 @@ import time import string import getopt -gcos = "unknown" index_url='' top_url='' changelog_file='' +content_negotiation = False package_name = '' package_version = '' mail_address = '(address unknown)' try: - mail_address= os.environ['MAILADDRESS'] + mail_address= os.environ['MAILADDRESS'] except KeyError: - pass + pass mail_address_url= 'mailto:' + mail_address if re.search ("http://", mail_address): - mail_address_url = mail_address - + mail_address_url = mail_address + webmaster= mail_address try: - webmaster= os.environ['WEBMASTER'] + webmaster= os.environ['WEBMASTER'] except KeyError: - pass + pass header_file = '' footer_file = '' @@ -47,7 +47,7 @@ default_footer = r"""
Please take me back to the index of @PACKAGE_NAME@ """ -built = r""" +built = r'''
%(wiki_string)s

@@ -55,99 +55,69 @@ built = r""" This page is for %(package_name)s-%(package_version)s (%(branch_str)s).

-Report errors to <%(mail_address)s>.
+Report errors to %(mail_address)s.

+''' -""" - -def gulp_file (f): - try: - i = open(f) - i.seek (0, 2) - n = i.tell () - i.seek (0,0) - except: - sys.stderr.write ("can't open file: %s\n" % f) - return '' - s = i.read (n) - if len (s) <= 0: - sys.stderr.write ("gulped empty file: %s\n" % f) - i.close () - return s def help (): - sys.stdout.write (r"""Usage: add-html-footer [OPTIONS]... HTML-FILE + sys.stdout.write (r"""Usage: add-html-footer [OPTIONS]... HTML-FILE Add header, footer and top of ChangLog file (up to the ********) to HTML-FILE Options: - --changelog=FILE use FILE as ChangeLog [ChangeLog] - --footer=FILE use FILE as footer - --header=FILE use FILE as header - -h, --help print this help - --index=URL set homepage to URL - --name=NAME set package_name to NAME - --version=VERSION set package version to VERSION + --changelog=FILE use FILE as ChangeLog [ChangeLog] + --content-negotiation strip .html and .png from urls + --footer=FILE use FILE as footer + --header=FILE use FILE as header + -h, --help print this help + --index=URL set homepage to URL + --name=NAME set package_name to NAME + --version=VERSION set package version to VERSION """) - sys.exit (0) + sys.exit (0) (options, files) = getopt.getopt(sys.argv[1:], 'h', [ - 'changelog=', 'footer=', 'header=', 'help', 'index=', - 'name=', 'version=']) + 'changelog=', 'footer=', 'header=', 'help', 'index=', + 'name=', 'content-negotiation', 'version=']) for opt in options: - o = opt[0] - a = opt[1] - if o == '--changelog': - changelog_file = a - elif o == '--footer': - footer_file = a - elif o == '--header': - header_file = a - elif o == '-h' or o == '--help': - help () - # urg, this is top! - elif o == '--index': - index_url = a - elif o == '--name': - package_name = a - elif o == '--version': - package_version = a - else: - raise 'unknown opt ', o - -#burp? -def set_gcos (): - global gcos - os.environ["CONFIGSUFFIX"] = 'www'; - if os.name == 'nt': - import ntpwd - pw = ntpwd.getpwname(os.environ['USERNAME']) - else: - import pwd - if os.environ.has_key('FAKEROOTKEY') and os.environ.has_key('LOGNAME'): - pw = pwd.getpwnam (os.environ['LOGNAME']) - else: - pw = pwd.getpwuid (os.getuid()) - - f = pw[4] - f = string.split (f, ',')[0] - gcos = f + o = opt[0] + a = opt[1] + if o == '--changelog': + changelog_file = a + elif o == '--content-negotiation': + content_negotiation = True + elif o == '--footer': + footer_file = a + elif o == '--header': + header_file = a + elif o == '-h' or o == '--help': + help () + # urg, this is top! + elif o == '--index': + index_url = a + elif o == '--name': + package_name = a + elif o == '--version': + package_version = a + else: + raise 'unknown opt ', o + def compose (default, file): - s = default - if file: - s = gulp_file (file) - return s + s = default + if file: + s = open (file).read () + return s -set_gcos () localtime = time.strftime ('%c %Z', time.localtime (time.time ())) if os.path.basename (index_url) != "index.html": - index_url = os.path.join (index_url , "index.html") + index_url = os.path.join (index_url , "index.html") top_url = os.path.dirname (index_url) + "/" header = compose (default_header, header_file) @@ -162,205 +132,216 @@ footer_tag = '' # On most platforms, this is equivalent to #`normpath(join(os.getcwd()), PATH)'. *Added in Python version 1.5.2* if os.path.__dict__.has_key ('abspath'): - abspath = os.path.abspath + abspath = os.path.abspath else: - def abspath (path): - return os.path.normpath (os.path.join (os.getcwd (), path)) - - -def remove_self_ref (s): - self_url = abspath (os.getcwd () + '/' + f) - #sys.stderr.write ('url0: %s\n' % self_url) - - # self_url = re.sub ('.*?' + string.lower (package_name) + '[^/]*/', - # '', self_url) - # URG - this only works when source tree is unpacked in `src/' dir - # For some reason, .*? still eats away - # /home/fred/usr/src/lilypond-1.5.14/Documentation/user/out-www/lilypond/ - # instead of just - # - # /home/fred/usr/src/lilypond-1.5.14/ - # - # Tutorial.html - self_url = re.sub ('.*?src/' + string.lower (package_name) + '[^/]*/', - '', self_url) - - #sys.stderr.write ('url1: %s\n' % self_url) - - #urg, ugly lily-specific toplevel index hack - self_url = re.sub ('.*topdocs/out-www/index.html', 'index.html', self_url) - #sys.stderr.write ('url2: %s\n' % self_url) - - # ugh, python2.[12] re is broken. - ## pat = re.compile ('.*?()([^<]*)()', re.DOTALL) - pat = re.compile ('[.\n]*?()([^<]*)()') - m = pat.search (s) - while m: - #sys.stderr.write ('self: %s\n' % m.group (2)) - s = s[:m.start (1)] + m.group (2) + s[m.end (3):] - m = pat.search (s) - return s + def abspath (path): + return os.path.normpath (os.path.join (os.getcwd (), path)) + + +def remove_self_ref (s): + self_url = abspath (os.getcwd () + '/' + f) + #sys.stderr.write ('url0: %s\n' % self_url) + + # self_url = re.sub ('.*?' + string.lower (package_name) + '[^/]*/', + # '', self_url) + # URG - this only works when source tree is unpacked in `src/' dir + # For some reason, .*? still eats away + # /home/fred/usr/src/lilypond-1.5.14/Documentation/user/out-www/lilypond/ + # instead of just + # + # /home/fred/usr/src/lilypond-1.5.14/ + # + # Tutorial.html + self_url = re.sub ('.*?src/' + string.lower (package_name) + '[^/]*/', + '', self_url) + + #sys.stderr.write ('url1: %s\n' % self_url) + + #urg, ugly lily-specific toplevel index hack + self_url = re.sub ('.*topdocs/out-www/index.html', 'index.html', self_url) + #sys.stderr.write ('url2: %s\n' % self_url) + + # ugh, python2.[12] re is broken. + ## pat = re.compile ('.*?()([^<]*)()', re.DOTALL) + pat = re.compile ('[.\n]*?()([^<]*)()') + m = pat.search (s) + while m: + #sys.stderr.write ('self: %s\n' % m.group (2)) + s = s[:m.start (1)] + m.group (2) + s[m.end (3):] + m = pat.search (s) + return s def do_file (f): - s = gulp_file (f) - s = re.sub ('%', '%%', s) - - - if re.search (header_tag, s) == None: - body = '' - s = re.sub ('(?i)', body, s) - if re.search ('(?i)]*>', body + header, s, 1) - elif re.search ('(?i)', '' + header, s, 1) - else: - s = header + s - - s = header_tag + '\n' + s - - if re.search ('(?i)\n' - s = doctype + s - - if re.search (footer_tag, s) == None: - s = s + footer_tag + '\n' - - if re.search ('(?i)', footer + '', s, 1) - elif re.search ('(?i)', footer + '', s, 1) - else: - s = s + footer - - s = i18n (f, s) - - #URUGRGOUSNGUOUNRIU - index = index_url - top = top_url - if os.path.basename (f) == "index.html": - cwd = os.getcwd () - if os.path.basename (cwd) == "topdocs": - index = "index.html" - top = "" - - # don't cause ///////index.html entries in log files. - # index = "./index.html" - # top = "./" - - versiontup = string.split(package_version, '.') - branch_str = 'stable-branch' - if string.atoi ( versiontup[1]) % 2: - branch_str = 'development-branch' - - wiki_page = ('v%s.%s-' % (versiontup[0], versiontup[1]) + f) - wiki_page = re.sub ('out-www/', '', wiki_page) - wiki_page = re.sub ('/', '-', wiki_page) - wiki_page = re.sub (r'\.-', '', wiki_page) - wiki_page = re.sub ('.html', '', wiki_page) - - wiki_string = '' - - if wiki_base: - wiki_string = (r'''Read comments on this page, or - add one.''' % - { 'wiki_base': wiki_base, - 'wiki_page': wiki_page}) - - subst = globals () - subst.update (locals()) - s = s % subst - - # urg - # maybe find first node? - fallback_web_title = '-- --' - - # ugh, python2.[12] re is broken. - #m = re.match ('.*?\(.*?\)', s, re.DOTALL) - m = re.match ('[.\n]*?([.\n]*?)', s) - if m: - fallback_web_title = m.group (1) - s = re.sub ('@WEB-TITLE@', fallback_web_title, s) - - s = remove_self_ref (s) - - # remove info's annoying's indication of referencing external document - s = re.sub (' \((lilypond|lilypond-internals|music-glossary)\)', - '', s) - - open (f, 'w').write (s) + s = open (f).read() + s = re.sub ('%', '%%', s) + + + if re.search (header_tag, s) == None: + body = '' + s = re.sub ('(?i)', body, s) + if re.search ('(?i)]*>', body + header, s, 1) + elif re.search ('(?i)', '' + header, s, 1) + else: + s = header + s + + s = header_tag + '\n' + s + + if re.search ('(?i)\n' + s = doctype + s + + if re.search (footer_tag, s) == None: + if re.search ('(?i)', footer_tag + footer + '\n' + '', s, 1) + elif re.search ('(?i)', footer_tag + footer + '\n' + '', s, 1) + else: + s = s + footer_tag + footer + '\n' + + s = i18n (f, s) + + #URUGRGOUSNGUOUNRIU + index = index_url + top = top_url + if os.path.basename (f) == "index.html": + cwd = os.getcwd () + if os.path.basename (cwd) == "topdocs": + index = "index.html" + top = "" + + # don't cause ///////index.html entries in log files. + # index = "./index.html" + # top = "./" + + versiontup = string.split(package_version, '.') + branch_str = 'stable-branch' + if string.atoi ( versiontup[1]) % 2: + branch_str = 'development-branch' + + wiki_page = ('v%s.%s-' % (versiontup[0], versiontup[1]) + f) + wiki_page = re.sub ('out-www/', '', wiki_page) + wiki_page = re.sub ('/', '-', wiki_page) + wiki_page = re.sub (r'\.-', '', wiki_page) + wiki_page = re.sub ('.html', '', wiki_page) + + wiki_string = '' + + if wiki_base: + wiki_string = (r'''Read comments on this page, or + add one.''' % + { 'wiki_base': wiki_base, + 'wiki_page': wiki_page}) + + subst = globals () + subst.update (locals()) + s = s % subst + + # urg + # maybe find first node? + fallback_web_title = '-- --' + + # ugh, python2.[12] re is broken. + #m = re.match ('.*?\(.*?\)', s, re.DOTALL) + m = re.match ('[.\n]*?([.\n]*?)', s) + if m: + fallback_web_title = m.group (1) + s = re.sub ('@WEB-TITLE@', fallback_web_title, s) + + s = remove_self_ref (s) + + # remove info's annoying's indication of referencing external document + s = re.sub (' \((lilypond|lilypond-internals|music-glossary)\)', + '', s) + + open (f, 'w').write (s) localedir = 'out/locale' try: - import gettext - gettext.bindtextdomain ('newweb', localedir) - gettext.textdomain ('newweb') - _ = gettext.gettext + import gettext + gettext.bindtextdomain ('newweb', localedir) + gettext.textdomain ('newweb') + _ = gettext.gettext except: - def _ (s): - return s + def _ (s): + return s underscore = _ - +C = 'site' LANGUAGES = ( - ('site', 'English'), - ('nl', 'Nederlands'), - ) + (C, 'English'), + ('nl', 'Nederlands'), + ('fr', 'French') + ) language_available = _ ("Other languages: %s.") % "%(language_menu)s" browser_language = _ ("Using automatic language selection.") \ - % "%(root_url)sabout/browser-language" + % "%(root_url)sabout/browser-language" LANGUAGES_TEMPLATE = '''\

- %(language_available)s -
- %(browser_language)s + %(language_available)s +
+ %(browser_language)s

''' % vars () def file_lang (file, lang): - (base, ext) = os.path.splitext (file) - base = os.path.splitext (base)[0] - if lang and lang != 'site': - return base + '.' + lang + ext - return base + ext + (base, ext) = os.path.splitext (file) + base = os.path.splitext (base)[0] + if lang and lang != C: + return base + '.' + lang + ext + return base + ext def i18n (file_name, page): - # ugh - root_url = "/web/" - - base_name = os.path.basename (file_name) - - lang = 'site' - m = re.match ('.*[.]([^.]*).html', file_name) - if m: - lang = m.group (1) - - # Find available translations of this page. - available = filter (lambda x: lang != x[0] \ - and os.path.exists (file_lang (file_name, x[0])), - LANGUAGES) - - # Strip .html, .png suffix for auto language selection. -# page = re.sub ('''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html(#[^"]*)|.png)[\'"]''', -# '\\1="\\2"', page) - - # Create language menu. - language_menu = '' - for (prefix, name) in available: - lang_file = file_lang (base_name, prefix) - language_menu += '%(name)s' % vars () - - languages = '' - if language_menu: - languages = LANGUAGES_TEMPLATE % vars () - - return page + languages - ## end i18n + # ugh + root_url = "/web/" + + base_name = os.path.basename (file_name) + + lang = C + m = re.match ('.*[.]([^/.]*).html', file_name) + if m: + lang = m.group (1) + + # Find available translations of this page. + available = filter (lambda x: lang != x[0] \ + and os.path.exists (file_lang (file_name, x[0])), + LANGUAGES) + + # Strip .html, .png suffix for auto language selection (content + # negotiation). The menu must keep the full extension, so do + # this before adding the menu. + if content_negotiation: + page = re.sub ('''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html|.png)(#[^"\']*|)[\'"]''', + '\\1="\\2\\4"', page) + + # Add menu after stripping: must not have autoselection for language menu. + language_menu = '' + for (prefix, name) in available: + lang_file = file_lang (base_name, prefix) + if language_menu != '': + language_menu += ', ' + language_menu += '%(name)s' % vars () + + languages = '' + if language_menu: + languages = LANGUAGES_TEMPLATE % vars () + + # Put language menu before '' and '' tags + if re.search ('(?i)', languages + '', page, 1) + elif re.search ('(?i)', languages + '', page, 1) + else: + page = page + languages + + return page for f in files: - do_file (f) + do_file (f)