]> git.donarmstrong.com Git - lilypond.git/blobdiff - python/auxiliar/postprocess_html.py
resolve merge
[lilypond.git] / python / auxiliar / postprocess_html.py
index 15fcbbec10b7f1992df208ba059c2f0097bd6b57..568ff17e75e42331648135b4423f0f836b16ee8c 100644 (file)
@@ -13,17 +13,21 @@ import langdefs
 
 # This is to try to make the docball not too big with almost duplicate files
 # see process_links()
-non_copied_pages = ['Documentation/user/out-www/lilypond-big-page',
-                    'Documentation/user/out-www/lilypond-internals-big-page',
-                    'Documentation/user/out-www/lilypond-learning-big-page',
-                    'Documentation/user/out-www/lilypond-program-big-page',
-                    'Documentation/user/out-www/music-glossary-big-page',
+non_copied_pages = ['Documentation/out-www/notation-big-page',
+                    'Documentation/out-www/internals-big-page',
+                    'Documentation/out-www/learning-big-page',
+                    'Documentation/out-www/usage-big-page',
+                    'Documentation/out-www/music-glossary-big-page',
+                    'Documentation/out-www/contributor',
+                    'Documentation/out-www/changes-big-page',
+                    'Documentation/out-www/essay-big-page',
+                    'Documentation/out-www/extending-big-page',
+                    'Documentation/out-www/snippets',
                     'out-www/examples',
                     'Documentation/topdocs',
                     'Documentation/bibliography',
                     'Documentation/out-www/THANKS',
                     'Documentation/out-www/DEDICATION',
-                    'Documentation/out-www/devel',
                     'input/']
 
 def _doc (s):
@@ -42,13 +46,19 @@ footer = '''
 </p>
 </div>
 '''
+
+web_footer = '''
+<div class="footer">
+</div>
+'''
+
 footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).')
 # ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process
-footer_report_links = _doc ('Your <a href="%(suggest_Docs_url)s">suggestions for the documentation</a> are welcome, please report errors to our <a href="%(mail_address_url)s">bug list</a>.')
+footer_report_links = _doc ('We welcome your aid; please <a href="%(help_us_url)s">help us</a> by reporting errors to our <a href="%(mail_address_url)s">bug list</a>.')
 
 
 mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs'
-suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding'
+help_us_url = 'http://lilypond.org/help-us.html'
 
 header_tag = '<!-- header_tag -->'
 header_tag_re = re.compile (header_tag)
@@ -91,43 +101,44 @@ def build_pages_dict (filelist):
 def source_links_replace (m, source_val):
     return 'href="' + os.path.join (source_val, m.group (1)) + '"'
 
-splitted_docs_re = re.compile ('(input/lsr/out-www/lilypond-snippets|\
-Documentation/user/out-www/(lilypond|music-glossary|lilypond-program|\
-lilypond-learning))/')
-
-snippets_ref_re = re.compile (r'href="(\.\./)?lilypond-snippets')
-user_ref_re = re.compile ('href="(?:\.\./)?lilypond\
-(-internals|-learning|-program|(?!-snippets))')
+# More hardcoding, yay!
+splitted_docs_re = re.compile('(Documentation/out-www/(automated-engraving|essay|notation|changes|extending|music-glossary|usage|web|learning|snippets))/')
+lily_snippets_re = re.compile ('(href|src)="([0-9a-f]{2}/lily-.*?)"')
+pictures_re = re.compile ('src="(pictures/.*?)"')
 
 docindex_link_re = re.compile (r'href="index.html"')
-
+manuals_page_link_re = re.compile (r'href="((?:\.\./)+)Documentation/web/manuals')
 
 ## Windows does not support symlinks.
 # This function avoids creating symlinks for splitted HTML manuals
 # Get rid of symlinks in GNUmakefile.in (local-WWW-post)
 # this also fixes missing PNGs only present in translated docs
-def hack_urls (s, prefix):
+def hack_urls (s, prefix, target, is_development_branch):
     if splitted_docs_re.match (prefix):
-        s = re.sub ('(href|src)="(../lily-.*?|.*?[.]png)"', '\\1="../\\2"', s)
-
-    # fix xrefs between documents in different directories ad hoc
-    if 'user/out-www/lilypond' in prefix:
-        s = snippets_ref_re.sub ('href="source/input/lsr/lilypond-snippets', s)
-    elif 'input/lsr' in prefix:
-        s = user_ref_re.sub ('href="source/Documentation/user/lilypond\\1', s)
-    
+        s = lily_snippets_re.sub ('\\1="../\\2"', s)
+        s = pictures_re.sub ('src="../\\1"', s)
+
     # we also need to replace in the lsr, which is already processed above!
-    if 'input/' in prefix or 'Documentation/topdocs' in prefix:
+    if 'input/' in prefix or 'Documentation/topdocs' in prefix or \
+            'Documentation/contributor' in prefix:
         # fix the link from the regtest, lsr and topdoc pages to the doc index 
         # (rewrite prefix to obtain the relative path of the doc index page)
         rel_link = re.sub (r'out-www/.*$', '', prefix)
         rel_link = re.sub (r'[^/]*/', '../', rel_link)
-        if 'input/regression' in prefix:
-            indexfile = "Documentation/devel/index"
+        if 'input/regression' in prefix or 'Documentation/contributor' in prefix:
+            indexfile = "Documentation/devel"
         else:
             indexfile = "index"
         s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s)
-
+    # make the "return to doc index" work with the online website.
+    if target == 'online':
+        if (('Documentation/contributor' in prefix) or
+            is_development_branch):
+            manuals_page = 'development'
+        else:
+            manuals_page = 'manuals'
+        s = manuals_page_link_re.sub (r'href="../../\1website/%s'
+                                      % manuals_page, s)
     source_path = os.path.join (os.path.dirname (prefix), 'source')
     if not os.path.islink (source_path):
         return s
@@ -140,9 +151,7 @@ doctype_re = re.compile ('(?i)<!DOCTYPE')
 doctype = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\n'
 css_re = re.compile ('(?i)<link rel="stylesheet" type="text/css" ([^>]*)href="[^">]*?lilypond.*\.css"([^>]*)>')
 end_head_tag_re = re.compile ('(?i)</head>')
-css_link = """    <link rel="stylesheet" type="text/css" title="Patrick McCarty's design" href="%(rel)sDocumentation/lilypond-mccarty.css">
-    <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond.css" title="Andrew Hawryluk's design">
-    <link rel="alternate stylesheet" type="text/css" href="%(rel)sDocumentation/lilypond-blue.css" title="Kurt Kroon's blue design">
+css_link = """    <link rel="stylesheet" type="text/css" title="Default design" href="%(rel)sDocumentation/lilypond-manuals.css">
     <!--[if lte IE 7]>
     <link href="%(rel)sDocumentation/lilypond-ie-fixes.css" rel="stylesheet" type="text/css">
     <![endif]-->
@@ -159,10 +168,8 @@ def add_header (s, prefix):
             if not n:
                 s = header + s
 
-        s = header_tag + '\n' + s
-
         if doctype_re.search (s) == None:
-            s = doctype + s
+            s = doctype + header_tag + '\n' + s
 
         if css_re.search (s) == None:
             depth = (prefix.count ('/') - 1) * '../'
@@ -217,7 +224,7 @@ online_links_re = re.compile ('''(href|src)=['"]\
 ((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\
 ([.]html)(#[^"']*|)['"]''')
 offline_links_re = re.compile ('href=[\'"]\
-((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]')
+((?!Compiling-from-source.html")(?![.]{2}/contributor)[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]')
 big_page_name_re = re.compile ('''(.+?)-big-page''')
 
 def process_i18n_big_page_links (match, prefix, lang_ext):
@@ -225,7 +232,8 @@ def process_i18n_big_page_links (match, prefix, lang_ext):
     if big_page_name:
         destination_path = os.path.normpath (os.path.join (os.path.dirname (prefix),
                                                            big_page_name.group (0)))
-        if not lang_ext in pages_dict[destination_path]:
+        if not (destination_path in pages_dict and
+                lang_ext in pages_dict[destination_path]):
             return match.group (0)
     return 'href="' + match.group (1) + '.' + lang_ext \
         + match.group (2) + match.group (3) + '"'
@@ -341,12 +349,15 @@ def process_html_files (package_name = '',
             in_f.close()
 
             s = s.replace ('%', '%%')
-            s = hack_urls (s, prefix)
+            s = hack_urls (s, prefix, target, bool (int (versiontup[1]) %  2))
             s = add_header (s, prefix)
 
             ### add footer
             if footer_tag_re.search (s) == None:
-                s = add_footer (s, footer_tag + footer)
+                if 'web' in file_name:
+                    s = add_footer (s, footer_tag + web_footer)
+                else:
+                    s = add_footer (s, footer_tag + footer)
 
                 available, missing = find_translations (prefix, lang_ext)
                 page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target)