1 from fsdict import FSDict
3 from urllib import quote_plus
10 #constant unlikely to occur in a docname and legal as a filename
11 MAGIC_SEPARATOR = '---###---'
13 def parse_date(datestring):
15 parser = parse_date.parser
16 except AttributeError:
17 import dateutil.parser
18 parser = dateutil.parser.parser()
19 parse_date.parser = parser
20 return parser.parse(datestring)
24 see: http://sphinx.pocoo.org/ext/appapi.html
25 this is the primary extension point for Sphinx
27 from sphinx.application import Sphinx
28 if not isinstance(app, Sphinx): return
29 app.add_config_value('feed_title', '', 'html')
30 app.add_config_value('feed_base_url', '', 'html')
31 app.add_config_value('feed_description', '', 'html')
32 app.add_config_value('feed_filename', 'rss.xml', 'html')
34 app.connect('html-page-context', create_feed_item)
35 app.connect('build-finished', emit_feed)
36 app.connect('builder-inited', create_feed_container)
37 app.connect('env-purge-doc', remove_dead_feed_item)
39 def create_feed_container(app):
41 create lazy filesystem stash for keeping RSS entry fragments, since we
42 don't want to store the entire site in the environment (in fact, even if
43 we did, it wasn't persisting for some reason.)
46 rss_fragment_path = os.path.realpath(os.path.join(app.outdir, '..', 'rss_entry_fragments'))
47 feed_entries = FSDict(work_dir=rss_fragment_path)
48 app.builder.env.feed_url = app.config.feed_base_url + '/' + \
49 app.config.feed_filename
51 def create_feed_item(app, pagename, templatename, ctx, doctree):
53 Here we have access to nice HTML fragments to use in, say, an RSS feed.
54 We serialize them to disk so that we get them preserved across builds.
56 We also inject useful metadata into the context here.
59 from absolutify_urls import absolutify
60 metadata = app.builder.env.metadata.get(pagename, {})
62 if 'date' not in metadata:
63 return #don't index dateless articles
65 pub_date = parse_date(metadata['date'])
66 app.builder.env.metadata.get(pagename, {})
67 except ValueError, exc:
68 #probably a nonsensical date
69 app.builder.warn('date parse error: ' + str(exc) + ' in ' + pagename)
72 # RSS item attributes, w/defaults:
73 # title, link, description, author_email=None,
74 # author_name=None, author_link=None, pubdate=None, comments=None,
75 # unique_id=None, enclosure=None, categories=(), item_copyright=None,
77 link = app.config.feed_base_url + '/' + ctx['current_page_name'] + ctx['file_suffix']
79 'title': ctx.get('title'),
82 'description': absolutify(ctx.get('body'), link),
85 if 'author' in metadata:
86 item['author'] = metadata['author']
87 feed_entries[nice_name(pagename, pub_date)] = item
89 #Now, useful variables to keep in context
90 ctx['rss_link'] = app.builder.env.feed_url
91 ctx['pub_date'] = pub_date
93 def remove_dead_feed_item(app, env, docname):
99 munged_name = ''.join([MAGIC_SEPARATOR,quote_plus(docname)])
100 for name in feed_entries:
101 if name.endswith(munged_name):
102 del(feed_entries[name])
104 def emit_feed(app, exc):
108 title = app.config.feed_title
110 title = app.config.project
114 'link': app.config.feed_base_url,
115 'feed_url': app.config.feed_base_url,
116 'description': app.config.feed_description
118 if app.config.language:
119 feed_dict['language'] = app.config.language
120 if app.config.copyright:
121 feed_dict['feed_copyright'] = app.config.copyright
122 feed = feedgenerator.Rss201rev2Feed(**feed_dict)
123 app.builder.env.feed_feed = feed
124 ordered_keys = feed_entries.keys()
125 ordered_keys.sort(reverse=True)
126 for key in ordered_keys:
127 feed.add_item(**feed_entries[key])
128 outfilename = os.path.join(app.builder.outdir,
129 app.config.feed_filename)
130 fp = open(outfilename, 'w')
131 feed.write(fp, 'utf-8')
134 def nice_name(docname, date):
136 we need convenient filenames which incorporate dates for ease of sorting and
137 guid for uniqueness, plus will work in the FS without inconvenient
138 characters. NB, at the moment, hour of publication is ignored.
140 return quote_plus(MAGIC_SEPARATOR.join([date.isoformat(), docname]))