X-Git-Url: https://git.donarmstrong.com/?a=blobdiff_plain;f=python%2Fauxiliar%2Fpostprocess_html.py;h=42ccdbe54114d6febe319ab73366434f2ed137ef;hb=4276123897fd7eb1cf9b06ed25f84888a7b8330e;hp=c26d7473e2efb8c3274e2299238c0686c571b95d;hpb=375c0be42300202d2f4b631f5302d817b8a91c60;p=lilypond.git diff --git a/python/auxiliar/postprocess_html.py b/python/auxiliar/postprocess_html.py index c26d7473e2..42ccdbe541 100644 --- a/python/auxiliar/postprocess_html.py +++ b/python/auxiliar/postprocess_html.py @@ -16,10 +16,12 @@ import langdefs non_copied_pages = ['Documentation/out-www/notation-big-page', 'Documentation/out-www/internals-big-page', 'Documentation/out-www/learning-big-page', - 'Documentation/out-www/application-big-page', + 'Documentation/out-www/usage-big-page', 'Documentation/out-www/music-glossary-big-page', 'Documentation/out-www/contributor', - 'Documentation/out-www/changes', + 'Documentation/out-www/changes-big-page', + 'Documentation/out-www/essay-big-page', + 'Documentation/out-www/extending-big-page', 'Documentation/out-www/snippets', 'out-www/examples', 'Documentation/topdocs', @@ -52,11 +54,11 @@ web_footer = ''' footer_name_version = _doc ('This page is for %(package_name)s-%(package_version)s (%(branch_str)s).') # ugh, must not have "_doc" in strings because it is naively replaced with "_" in hacked gettext process -footer_report_links = _doc ('Your suggestions for the documentation are welcome, please report errors to our bug list.') +footer_report_links = _doc ('We welcome your aid; please help us by reporting errors to our bug list.') mail_address = 'http://post.gmane.org/post.php?group=gmane.comp.gnu.lilypond.bugs' -suggest_Docs_url = 'http://lilypond.org/web/devel/participating/documentation-adding' +help_us_url = 'http://lilypond.org/help-us.html' header_tag = '' header_tag_re = re.compile (header_tag) @@ -100,19 +102,25 @@ def source_links_replace (m, source_val): return 'href="' + os.path.join (source_val, m.group (1)) + '"' # More hardcoding, yay! -splitted_docs_re = re.compile ('(Documentation/out-www/(automated-engraving|essay|notation|music-glossary|application|general|learning|snippets))/') +split_docs_re = re.compile('(Documentation/out-www/(automated-engraving|essay|notation|changes|extending|music-glossary|usage|web|learning|snippets|contributor))/') +lily_examples_re = re.compile ('(href|src)="(ly-examples/.*?)"') lily_snippets_re = re.compile ('(href|src)="([0-9a-f]{2}/lily-.*?)"') pictures_re = re.compile ('src="(pictures/.*?)"') docindex_link_re = re.compile (r'href="index.html"') - +manuals_page_link_re = re.compile (r'href="((?:\.\./)+)Documentation/web/manuals') ## Windows does not support symlinks. -# This function avoids creating symlinks for splitted HTML manuals +# This function avoids creating symlinks for split HTML manuals # Get rid of symlinks in GNUmakefile.in (local-WWW-post) # this also fixes missing PNGs only present in translated docs -def hack_urls (s, prefix): - if splitted_docs_re.match (prefix): +def hack_urls (s, prefix, target, is_development_branch): + depth = (prefix.count ('/') - 1) * '../' + # fix css links + s = css_re.sub ('' % {'rel': depth}, s) + # fix image links + if split_docs_re.match (prefix): + s = lily_examples_re.sub ('\\1="../\\2"', s) s = lily_snippets_re.sub ('\\1="../\\2"', s) s = pictures_re.sub ('src="../\\1"', s) @@ -128,7 +136,15 @@ def hack_urls (s, prefix): else: indexfile = "index" s = docindex_link_re.sub ('href="' + rel_link + indexfile + '.html\"', s) - + # make the "return to doc index" work with the online website. + if target == 'online': + if (('Documentation/contributor' in prefix) or + is_development_branch): + manuals_page = 'development' + else: + manuals_page = 'manuals' + s = manuals_page_link_re.sub (r'href="../../\1website/%s' + % manuals_page, s) source_path = os.path.join (os.path.dirname (prefix), 'source') if not os.path.islink (source_path): return s @@ -139,13 +155,11 @@ body_tag_re = re.compile ('(?i)]*)>') html_tag_re = re.compile ('(?i)') doctype_re = re.compile ('(?i)\n' -css_re = re.compile ('(?i)]*)href="[^">]*?lilypond.*\.css"([^>]*)>') +css_re = re.compile ('(?i)]*)href="[^">]*?(lilypond.*\.css)"([^>]*)>') end_head_tag_re = re.compile ('(?i)') -css_link = """ - - +css_link = """ """ @@ -160,10 +174,8 @@ def add_header (s, prefix): if not n: s = header + s - s = header_tag + '\n' + s - if doctype_re.search (s) == None: - s = doctype + s + s = doctype + header_tag + '\n' + s if css_re.search (s) == None: depth = (prefix.count ('/') - 1) * '../' @@ -218,7 +230,7 @@ online_links_re = re.compile ('''(href|src)=['"]\ ((?!Compiling-from-source.html")[^/][.]*[^.:'"]*)\ ([.]html)(#[^"']*|)['"]''') offline_links_re = re.compile ('href=[\'"]\ -((?!Compiling-from-source.html")[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]') +((?!Compiling-from-source.html")(?![.]{2}/contributor)[^/][.]*[^.:\'"]*)([.]html)(#[^"\']*|)[\'"]') big_page_name_re = re.compile ('''(.+?)-big-page''') def process_i18n_big_page_links (match, prefix, lang_ext): @@ -338,30 +350,36 @@ def process_html_files (package_name = '', for prefix, ext_list in pages_dict.items (): for lang_ext in ext_list: file_name = langdefs.lang_file_name (prefix, lang_ext, '.html') - in_f = open (file_name) - s = in_f.read() - in_f.close() - - s = s.replace ('%', '%%') - s = hack_urls (s, prefix) - s = add_header (s, prefix) - - ### add footer - if footer_tag_re.search (s) == None: - if 'general' in file_name: - s = add_footer (s, footer_tag + web_footer) - else: - s = add_footer (s, footer_tag + footer) - - available, missing = find_translations (prefix, lang_ext) - page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target) - # Add menu after stripping: must not have autoselection for language menu. - page_flavors = add_menu (page_flavors, prefix, available, target, translation) - for k in page_flavors: - page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]] - out_f = open (name_filter (k), 'w') - out_f.write (page_flavors[k][1]) - out_f.close() + source_time = os.path.getmtime(file_name) + dest_time = 0 + if os.path.exists(name_filter(file_name)): + dest_time = os.path.getmtime(name_filter(file_name)) + if dest_time < source_time: + + in_f = open (file_name) + s = in_f.read() + in_f.close() + + s = s.replace ('%', '%%') + s = hack_urls (s, prefix, target, bool (int (versiontup[1]) % 2)) + s = add_header (s, prefix) + + ### add footer + if footer_tag_re.search (s) == None: + if 'web' in file_name: + s = add_footer (s, footer_tag + web_footer) + else: + s = add_footer (s, footer_tag + footer) + + available, missing = find_translations (prefix, lang_ext) + page_flavors = process_links (s, prefix, lang_ext, file_name, missing, target) + # Add menu after stripping: must not have autoselection for language menu. + page_flavors = add_menu (page_flavors, prefix, available, target, translation) + for k in page_flavors: + page_flavors[k][1] = page_flavors[k][1] % subst[page_flavors[k][0]] + out_f = open (name_filter (k), 'w') + out_f.write (page_flavors[k][1]) + out_f.close() # if the page is translated, a .en.html symlink is necessary for content negotiation - if target == 'online' and ext_list != ['']: + if target == 'online' and ext_list != [''] and not os.path.lexists (name_filter (prefix + '.en.html')): os.symlink (os.path.basename (prefix) + '.html', name_filter (prefix + '.en.html'))