X-Git-Url: https://git.donarmstrong.com/?a=blobdiff_plain;ds=sidebyside;f=buildscripts%2Fpostprocess_html.py;h=e94da797559b224694a7caae817408310f9b0d4d;hb=bd2898b5e700237d4d1d7d934994713b87746182;hp=f4c4797f988a67e218bbc4a54b7942cb868f08a3;hpb=164e8f28f50194a13c1f16de4b8279a17ced424d;p=lilypond.git diff --git a/buildscripts/postprocess_html.py b/buildscripts/postprocess_html.py index f4c4797f98..e94da79755 100644 --- a/buildscripts/postprocess_html.py +++ b/buildscripts/postprocess_html.py @@ -1,7 +1,8 @@ #!@PYTHON@ """ -Postprocess HTML files. +Postprocess HTML files: +add footer, tweak links, add language selection menu. """ import re import os @@ -32,23 +33,19 @@ header = r""" """ footer = ''' -
-

- +

''' footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).') -footer_report_errors = _doc ('Report errors to %(mail_address)s.') # ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process -footer_suggest_docs = _doc ('Your suggestions for the documentation are welcome.') +footer_report_links = _doc ('Your suggestions for the documentation are welcome, please report errors to our bug list.') + mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs' suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding' @@ -94,10 +91,16 @@ def build_pages_dict (filelist): def source_links_replace (m, source_val): return 'href="' + os.path.join (source_val, m.group (1)) + '"' -splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|lilypond-learning))/') +splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\ +Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\ +lilypond-learning))/') snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets') -user_ref_re = re.compile (r'href="(?:\.\./)?lilypond(-internals|-learning|-program|(?!-snippets))') +user_ref_re = re.compile ('href="(?:\.\./)?lilypond\ +(-internals|-learning|-program|(?!-snippets))') + +docindex_link_re = re.compile (r'href="index.html"') + ## Windows does not support symlinks. # This function avoids creating symlinks for splitted HTML manuals @@ -112,6 +115,18 @@ def hack_urls (s, prefix): s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s) elif 'input/lsr' in prefix: s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s) + + # we also need to replace in the lsr, which is already processed above! + if 'input/' in prefix or 'Documentation/topdocs' in prefix: + # fix the link from the regtest, lsr and topdoc pages to the doc index + # (rewrite prefix to obtain the relative path of the doc index page) + rel_link = re.sub (r'out-www/.*$', '', prefix) + rel_link = re.sub (r'[^/]*/', '../', rel_link) + if 'input/regression' in prefix: + indexfile = "Documentation/devel" + else: + indexfile = "index" + s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s) source_path = os.path.join (os.path.dirname (prefix), 'source') if not os.path.islink (source_path): @@ -123,11 +138,21 @@ body_tag_re = re.compile ('(?i)]*)>') html_tag_re = re.compile ('(?i)') doctype_re = re.compile ('(?i)\n' +css_re = re.compile ('(?i)]*)href="[^">]*?lilypond.*\.css"([^>]*)>') +end_head_tag_re = re.compile ('(?i)') +css_link = """ + + + +""" -def add_header (s): - """Add header ( and doctype)""" + +def add_header (s, prefix): + """Add header (, doctype and CSS)""" if header_tag_re.search (s) == None: - body = '' + body = '' (s, n) = body_tag_re.subn (body + header, s, 1) if not n: (s, n) = html_tag_re.subn ('' + header, s, 1) @@ -138,7 +163,11 @@ def add_header (s): if doctype_re.search (s) == None: s = doctype + s - return s + + if css_re.search (s) == None: + depth = (prefix.count ('/') - 1) * '../' + s = end_head_tag_re.sub ((css_link % {'rel': depth}) + '', s) + return s title_tag_re = re.compile ('.*?(.*?)', re.DOTALL) AT_web_title_re = re.compile ('@WEB-TITLE@') @@ -177,18 +206,34 @@ def find_translations (prefix, lang_ext): if lang_ext != e: if e in pages_dict[prefix]: available.append (l) - elif lang_ext == '' and l.enabled and reduce (operator.and_, [not prefix.startswith (s) for s in non_copied_pages]): + elif lang_ext == '' and l.enabled and reduce (operator.and_, + [not prefix.startswith (s) + for s in non_copied_pages]): # English version of missing translated pages will be written missing.append (e) return available, missing -online_links_re = re.compile ('''(href|src)=[\'"]([^/][.]*[^.:\'"]*)(.html|.png)(#[^"\']*|)[\'"]''') -offline_links_re = re.compile ('''href=[\'"]([^/][.]*[^.:\'"]*)(.html)(#[^"\']*|)[\'"]''') +online_links_re = re.compile ('''(href|src)=['"]\ +((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\ +([.]html)(#[^"']*|)['"]''') +offline_links_re = re.compile ('href=[\'"]\ +((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]') +big_page_name_re = re.compile ('''(.+?)-big-page''') + +def process_i18n_big_page_links (match, prefix, lang_ext): + big_page_name = big_page_name_re.match (match.group (1)) + if big_page_name: + destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix), + big_page_name.group (0))) + if not lang_ext in pages_dict[destination_path]: + return match.group (0) + return 'href="' + match.group (1) + '.' + lang_ext \ + + match.group (2) + match.group (3) + '"' def process_links (s, prefix, lang_ext, file_name, missing, target): page_flavors = {} if target == 'online': - # Strip .html, .png suffix for auto language selection (content + # Strip .html, suffix for auto language selection (content # negotiation). The menu must keep the full extension, so do # this before adding the menu. page_flavors[file_name] = \ @@ -196,7 +241,7 @@ def process_links (s, prefix, lang_ext, file_name, missing, target): elif target == 'offline': # in LANG doc index: don't rewrite .html suffixes # as not all .LANG.html pages exist; - # the doc index should be translated and contain the right links + # the doc index should be translated and contain links with the right suffixes if prefix == 'Documentation/out-www/index': page_flavors[file_name] = [lang_ext, s] elif lang_ext == '': @@ -205,9 +250,18 @@ def process_links (s, prefix, lang_ext, file_name, missing, target): page_flavors[langdefs.lang_file_name (prefix, e, '.html')] = \ [e, offline_links_re.sub ('href="\\1.' + e + '\\2\\3"', s)] else: - page_flavors[file_name] = \ - [lang_ext, - offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)] + # For saving bandwidth and disk space, we don't duplicate big pages + # in English, so we must process translated big pages links differently. + if 'big-page' in prefix: + page_flavors[file_name] = \ + [lang_ext, + offline_links_re.sub \ + (lambda match: process_i18n_big_page_links (match, prefix, lang_ext), + s)] + else: + page_flavors[file_name] = \ + [lang_ext, + offline_links_re.sub ('href="\\1.' + lang_ext + '\\2\\3"', s)] return page_flavors def add_menu (page_flavors, prefix, available, target, translation): @@ -230,7 +284,6 @@ def add_menu (page_flavors, prefix, available, target, translation): if language_menu: language_available = t (lang_available) % language_menu languages = LANGUAGES_TEMPLATE % vars () - # put language menu before '' and '' tags page_flavors[k][1] = add_footer (page_flavors[k][1], languages) return page_flavors @@ -277,8 +330,7 @@ def process_html_files (package_name = '', # so only one '%' formatting pass is needed later for e in subst: subst[e]['footer_name_version'] = subst[e]['footer_name_version'] % subst[e] - subst[e]['footer_report_errors'] = subst[e]['footer_report_errors'] % subst[e] - subst[e]['footer_suggest_docs'] = subst[e]['footer_suggest_docs'] % subst[e] + subst[e]['footer_report_links'] = subst[e]['footer_report_links'] % subst[e] for prefix, ext_list in pages_dict.items (): for lang_ext in ext_list: @@ -289,12 +341,12 @@ def process_html_files (package_name = '', s = s.replace ('%', '%%') s = hack_urls (s, prefix) - s = add_header (s) + s = add_header (s, prefix) ### add footer if footer_tag_re.search (s) == None: s = add_footer (s, footer_tag + footer) - + available, missing = find_translations (prefix, lang_ext) page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target) # Add menu after stripping: must not have autoselection for language menu.