2 """Tell me who you are!
9 from ConfigParser import SafeConfigParser
10 from optparse import OptionParser, OptionGroup, OptionConflictError
12 # Lets first assure no guarding (but annoying) warnings
14 warnings.simplefilter('ignore', FutureWarning)
15 warnings.filterwarnings('ignore',
16 'Module debian_bundle was already imported.*', UserWarning)
18 from debian import deb822
19 import apt # initializes the "_system" ;)
20 from apt_pkg import version_compare
34 from jinja2 import Environment, PackageLoader
36 from pprint import PrettyPrinter
39 class AptListsCache(object):
40 def __init__(self, cachedir='build/cache',
43 self.cachedir = cachedir
45 if not ro_cachedirs is None:
46 self.ro_cachedirs = ro_cachedirs
48 self.ro_cachedirs = []
51 create_dir(self.cachedir)
53 def get(self, url, update=False):
54 """Looks in the cache if the file is there and takes the cached one.
55 Otherwise it is downloaded first.
57 Knows how to deal with http:// and svn:// URLs.
62 # look whether it is compressed
63 cext = url.split('.')[-1]
64 if cext in ['gz', 'bz2']:
65 target_url = url[:-1 * len(cext) -1]
67 # assume not compressed
71 # turn url into a filename -- mimik what APT does for
73 tfilename = '_'.join(target_url.split('/')[2:])
75 # if we need to download anyway do not search
77 cfilename = os.path.join(self.cachedir, tfilename)
79 # look for the uncompressed file anywhere in the cache
81 for cp in [self.cachedir] + self.ro_cachedirs:
82 if os.path.exists(os.path.join(cp, tfilename)):
83 cfilename = os.path.join(cp, tfilename)
88 cfilename = os.path.join(self.cachedir, tfilename)
91 # if updated needed -- download
93 #print 'Caching file from %s' % url
95 if url.startswith('svn://'):
97 pysvn.Client().export(url, cfilename)
98 if url.startswith('http://'):
100 tempfile, ignored = urllib.urlretrieve(url)
105 decompressor = 'gzip'
107 decompressor = 'bzip2'
112 "Don't know how to decompress %s files" \
115 if not decompressor is None:
116 if subprocess.call([decompressor, '-d', '-q', '-f',
118 raise RuntimeError, \
119 "Something went wrong while decompressing '%s'" \
122 # move decompressed file into cache
123 shutil.move(os.path.splitext(tempfile)[0], cfilename)
125 # XXX do we need that if explicit filename is provided?
129 fh = codecs.open(cfilename, 'r', 'utf-8')
134 def add_pkgfromtaskfile(db, urls):
135 cache = AptListsCache()
141 # loop over all stanzas
142 for stanza in deb822.Packages.iter_paragraphs(fh):
143 if stanza.has_key('Depends'):
144 pkg = stanza['Depends']
145 elif stanza.has_key('Recommends'):
146 pkg = stanza['Recommends']
147 elif stanza.has_key('Suggests'):
148 pkg = stanza['Suggests']
152 # account for multiple packages per line
154 pkgs += [p.strip() for p in pkg.split(',')]
156 pkgs.append(pkg.strip())
159 if not db.has_key(p):
160 db[p] = get_emptydbentry()
164 def get_emptydbentry():
167 def import_blendstask(cfg, db, url):
168 cache = AptListsCache()
172 # figure out blend's task page URL, since they differ from blend to blend
173 urlsec = url.split('/')
174 blendname = urlsec[-3]
175 if blendname == 'debian-med':
176 taskpage_url = 'http://debian-med.alioth.debian.org/tasks/'
177 elif blendname == 'debian-science':
178 taskpage_url = 'http://blends.alioth.debian.org/science/tasks/'
180 raise ValueError('Unknown blend "%s"' % blendname)
181 taskpage_url += urlsec[-1]
183 for st in deb822.Packages.iter_paragraphs(fh):
184 if st.has_key('Task'):
185 task_name = st['Task']
186 task = (blendname, task_name, taskpage_url)
188 if st.has_key('Depends'):
190 elif st.has_key('Recommends'):
191 pkg = st['Recommends']
192 elif st.has_key('Suggests'):
195 # print 'Warning: Cannot determine name of prospective package ' \
196 # '... ignoring. Dump follows:'
200 # take care of pkg lists
201 for p in pkg.split(', '):
202 if not db.has_key(p):
203 print 'Ignoring blend package "%s"' % p
209 info['tasks'] = [task]
210 if st.has_key('License'):
211 info['license'] = st['License']
212 if st.has_key('Responsible'):
213 info['responsible'] = st['Responsible']
216 if st.has_key('Pkg-Description'):
217 descr = st['Pkg-Description'].split('\n')
218 info['description'] = descr[0].strip()
219 info['long_description'] = \
220 u'\n'.join(descr[1:])
222 # charge the basic property set
223 db[p]['main']['description'] = info['description']
224 db[p]['main']['long_description'] = info['long_description']
225 if st.has_key('WNPP'):
226 db[p]['main']['debian_itp'] = st['WNPP']
227 if st.has_key('Pkg-URL'):
228 db[p]['main']['other_pkg'] = st['Pkg-URL']
229 if st.has_key('Homepage'):
230 db[p]['main']['homepage'] = st['Homepage']
233 if st.has_key('Published-Title'):
234 title = st['Published-Title']
236 # trip trailing dot -- added later
237 pub = {'title': title[:-1]}
239 pub = {'title': title}
240 if st.has_key('Published-Authors'):
241 pub['authors'] = st['Published-Authors']
242 if st.has_key('Published-Year'):
243 pub['year'] = st['Published-Year']
244 if st.has_key('Published-In'):
245 pub['in'] = st['Published-In']
246 if st.has_key('Published-URL'):
247 pub['url'] = st['Published-URL']
248 if st.has_key('Published-DOI'):
249 pub['doi'] = st['Published-DOI']
250 # need at least one URL
251 if not pub.has_key('url'):
252 pub['url'] = "http://dx.doi.org/%s" % st['Published-DOI']
254 db[p]['main']['publication'] = pub
257 if st.has_key('Registration'):
258 db[p]['main']['registration'] = st['Registration']
261 if st.has_key('Remark'):
262 # prepend a single space to make it look like a long description
263 info['remark'] = convert_longdescr(' ' + st['Remark'])
265 # only store if there isn't something already
266 if not db[p].has_key('blends'):
267 db[p]['blends'] = info
269 # just add this tasks name and id
270 db[p]['blends']['tasks'].append(task)
272 # handle pkg name aliases
273 if p in cfg.options('blend package aliases'):
274 src_entry = db[p].copy()
275 # remove original entry
277 # copy the entry into all aliases
278 for alias in cfg.get('blend package aliases', p).split():
279 print "Aliasing %s to %s" % (p, alias)
280 db[alias] = copy.deepcopy(src_entry)
285 def get_releaseinfo(rurl):
286 cache = AptListsCache()
287 # root URL of the repository
288 baseurl = '/'.join(rurl.split('/')[:-1])
289 # get the release file from the cache
290 release_file = cache.get(rurl)
292 # create parser instance
293 rp = deb822.Release(release_file)
295 # architectures on this dist
296 archs = rp['Architectures'].split()
297 components = rp['Components'].split()
298 # compile a new codename that also considers the repository label
299 # to distinguish between official and unofficial repos.
301 origin = rp['Origin']
302 codename = rp['Codename']
303 labelcode = '_'.join([rp['Label'], rp['Codename']])
308 return {'baseurl': baseurl, 'archs': archs, 'components': components,
309 'codename': codename, 'label': label, 'labelcode': labelcode,
313 def build_pkgsurl(baseurl, component, arch):
314 return '/'.join([baseurl, component, 'binary-' + arch, 'Packages.bz2'])
317 def import_release(cfg, db, rurl):
318 cache = AptListsCache()
320 ri = get_releaseinfo(rurl)
322 # compile the list of Packages files to parse and parse them
323 for c in ri['components']:
324 for a in ri['archs']:
325 # compile packages URL
326 pkgsurl = build_pkgsurl(ri['baseurl'], c, a)
328 # retrieve from cache
329 packages_file = cache.get(pkgsurl)
332 for stanza in deb822.Packages.iter_paragraphs(packages_file):
333 db = _store_pkg(cfg, db, stanza, ri['origin'], ri['codename'], c, ri['baseurl'])
336 packages_file.close()
340 def _store_pkg(cfg, db, st, origin, codename, component, baseurl):
347 # only care for known packages
348 if not db.has_key(pkg):
349 # print 'Ignoring NeuroDebian package "%s"' % pkg
352 distkey = (trans_codename(codename, cfg), 'neurodebian-' + codename)
354 if db[pkg].has_key(distkey):
355 info = db[pkg][distkey]
357 info = {'architecture': []}
360 if not st['Architecture'] in info['architecture']:
361 info['architecture'].append(st['Architecture'])
362 info['maintainer'] = st['Maintainer']
363 if st.has_key('Homepage'):
364 info['homepage'] = st['Homepage']
365 info['version'] = st['Version']
368 info['distribution'] = origin
369 info['release'] = codename
370 info['component'] = component
373 info['poolurl'] = '/'.join([os.path.dirname(st['Filename'])])
376 descr = st['Description'].replace('%', '%%').split('\n')
377 info['description'] = descr[0].strip()
378 info['long_description'] = u'\n'.join(descr[1:])
380 db[pkg][distkey] = info
382 # charge the basic property set
383 db[pkg]['main']['description'] = info['description']
384 db[pkg]['main']['long_description'] = info['long_description']
385 if st.has_key('Source'):
386 db[pkg]['main']['sv'] = "%s %s" % (st['Source'], st['Version'])
388 db[pkg]['main']['sv'] = "%s %s" % (st['Package'], st['Version'])
389 if st.has_key('Homepage'):
390 db[pkg]['main']['homepage'] = st['Homepage']
391 if st.has_key('Recommends'):
392 db[pkg]['main']['recommends'] = st['Recommends']
397 def trans_codename(codename, cfg):
398 """Translate a known codename into a release description.
400 Unknown codenames will simply be returned as is.
402 # if we know something, tell
403 if codename in cfg.options('release codenames'):
404 return cfg.get('release codenames', codename)
409 def create_dir(path):
410 if os.path.exists(path):
413 ps = path.split(os.path.sep)
415 for i in range(1,len(ps) + 1):
416 p = os.path.sep.join(ps[:i])
418 if not os.path.exists(p):
422 def dde_get(url, fail=False):
423 # enforce delay to be friendly to DDE
426 data = json.load(urllib2.urlopen(url+"?t=json"))['r']
427 print "SUCCESS:", url
429 except urllib2.HTTPError, e:
430 print "NOINFO:", url, type(e)
432 except urllib2.URLError, e:
433 print "URLERROR:", url, type(e)
435 print "Permanant failure"
437 print "Try again after 30 seconds..."
439 return dde_get(url, fail=True)
440 except (StopIteration):
444 print "UDD-DOWN?:", url, type(e)
448 def nitrc_get(spec, fail=False):
449 nitrc_url = 'http://www.nitrc.org/export/site/projects.json.php'
451 # change into this from python 2.6 on
452 #data = json.loads(urllib2.urlopen(nitrc_url + '?spec=%s' % spec).read())
453 data = json.load(urllib2.urlopen(nitrc_url + '?spec=%s' % spec))
454 print "NITRC-SUCCESS:", spec
455 except urllib2.HTTPError, e:
456 print "NITRC-NOINFO:", spec, type(e)
458 except urllib2.URLError, e:
459 print "NITRC-URLERROR:", spec, type(e)
461 print "Permanant failure"
463 print "Try again after 30 seconds..."
465 return nitrc_get(spec, fail=True)
469 def parse_nitrc(data):
472 # simplify -- there is only one project in the data
473 project = data['projects'][0]
474 nitrc_filtered = {'downloads': 0,
476 for pkg in project['packages']:
477 for release in pkg['releases']:
478 for file in release['files']:
479 nitrc_filtered['downloads'] += file['download_count']
480 return nitrc_filtered
483 def import_nitrc(cfg, db):
485 if not cfg.has_option("nitrc ids", p):
487 nitrc_spec = cfg.get("nitrc ids", p)
488 nitrc_data = nitrc_get(nitrc_spec)
489 nitrc_excerpt = parse_nitrc(nitrc_data)
490 if not nitrc_excerpt is None:
491 db[p]['nitrc'] = nitrc_excerpt
495 def import_dde(cfg, db):
496 query_url = cfg.get('dde', 'pkgquery_url')
499 q = dde_get(query_url + "/packages/all/%s" % p)
501 # copy all stuff, while preserving non-overlapping information
502 for k, v in q.iteritems():
504 # get latest popcon info for debian and ubuntu
505 # cannot use origin field itself, since it is none for few packages
507 origin = q['drc'].split()[0]
508 if origin == 'ubuntu':
509 if q.has_key('popcon'):
510 db[p]['main']['ubuntu_popcon'] = q['popcon']
511 # if we have ubuntu, need to get debian
512 q = dde_get(query_url + "/packages/prio-debian-sid/%s" % p)
513 if q and q.has_key('popcon'):
514 db[p]['main']['debian_popcon'] = q['popcon']
515 elif origin == 'debian':
516 if q.has_key('popcon'):
517 db[p]['main']['debian_popcon'] = q['popcon']
518 # if we have debian, need to get ubuntu
519 q = dde_get(query_url + "/packages/prio-ubuntu-precise/%s" % p)
520 if q and q.has_key('popcon'):
521 db[p]['main']['ubuntu_popcon'] = q['popcon']
523 print("Ignoring unkown origin '%s' for package '%s'." \
526 # now get info for package from all releases in UDD
527 q = dde_get(query_url + "/dist/p:%s" % p)
530 # hold all info about this package per distribution release
533 distkey = (trans_codename(cp['release'], cfg),
534 "%s-%s" % (cp['distribution'], cp['release']))
535 if not info.has_key(distkey):
537 # turn into a list to append others later
538 info[distkey]['architecture'] = [info[distkey]['architecture']]
539 # accumulate data for multiple over archs
541 comp = version_compare(cp['version'],
542 info[distkey]['version'])
543 # found another arch for the same version
545 info[distkey]['architecture'].append(cp['architecture'])
546 # found newer version, dump the old ones
549 # turn into a list to append others later
550 info[distkey]['architecture'] = [info[distkey]['architecture']]
551 # simply ignore older versions
555 # finally assign the new package data
556 for k, v in info.iteritems():
561 def assure_unicode(s):
562 """Assure that argument is unicode
564 Necessary if strings are not carrying out Pythonish 'u' prefix to
565 signal UTF8 strings, but are in fact UTF8
567 if type(s) is unicode:
570 # attempt regular unicode call and if fails -- just decode it
574 except UnicodeDecodeError, e:
575 return s.decode('utf8')
577 return assure_unicode(str(s))
580 def convert_longdescr(ld):
583 yoh: I think all this long description conversion will keep giving
584 us problems since per se there is no strict regulations,
585 especially in blends files
588 ld = ld.replace('% ', '%% ')
589 ld = ld.replace(r'\t', ' ') # just in case assuming tab 4
591 re_leadblanks = re.compile("^ *")
592 re_itemized = re.compile("^[o*-+] +")
593 re_itemized_gr = re.compile("^( *)([-o*+] +)?(.*?)$")
594 re_description_gr = re.compile("^( *[^-]+ - )(.*?)$")
596 def unwrap_lines(lines):
600 match = re_itemized_gr.search(l).groups()
601 if ((len(match[0]) in indent_levels and match[1] is None)
602 or (len(match[0]) > max(indent_levels)+4)) \
603 and match[2].strip() != '.':
605 if not out[-1].endswith(" "):
611 indent_levels = [len(match[0])]
612 if match[1] is not None:
613 indent_levels += [len(match[0]) + len(match[1])]
614 if match[2].strip() == '.':
615 # reset though if '.'
619 def dedent_withlevel(lines):
620 """Dedent `lines` given in a list provide dedented lines and how much was dedented
622 nleading = min([re_leadblanks.search(l).span()[1]
624 return [l[nleading:] for l in lines], nleading
626 def block_lines(ld, level=0):
627 # so we got list of lines
628 # dedent all of them first
629 ld, level = dedent_withlevel(ld)
631 # lets collect them in blocks/paragraphs
632 # 1. into paragraphs split by '.'
633 blocks, block = [], None
635 # next block can begin if
637 # 2. it was an itemized list and all items begin with
638 # the same symbol or get further indented accordingly
639 # so let's first check if it is an itemized list
640 itemized_match = re_itemized.search(ld[0])
642 allow_indents = " "*itemized_match.span()[1]
646 if block is None or l.strip() == '.' \
647 or (len(l) and ( len(block) and (
648 (l.startswith(' ') and not block[-1].startswith(' '))
650 (not l.startswith(' ') and block[-1].startswith(' '))))):
658 return [block_lines(b, level+1) for b in blocks if len(b)]
660 def blocks_to_rst(bls, level=0):
661 # check if this block is an itemized beast
662 #itemized_match = re_itemized_gr.search(bls[0][0])
664 # res += ' 'allow_indents = " "*itemized_match.span()[1]
667 if isinstance(b, list):
669 out += " "*level + b[0] + '\n\n'
671 out += blocks_to_rst(b, level+1)
673 e = " "*level + b + '\n'
674 if not re_itemized.search(b):
677 elif len(e) and e[0] == ' ':
678 # strip 1 leading blank
684 ld = unwrap_lines(ld)
685 bls = block_lines(ld)
686 return blocks_to_rst(bls)
689 def underline_text(text, symbol):
690 underline = symbol * len(text)
691 return '%s\n%s\n' % (text, underline)
694 def generate_pkgpage(pkg, cfg, db, template, addenum_dir, extracts_dir):
695 # local binding for ease of use
697 # do nothing if there is not at least the very basic stuff
698 if not pkgdb['main'].has_key('description'):
700 title = '**%s** -- %s' % (pkg, pkgdb['main']['description'])
701 title = underline_text(title, '*')
704 if 'sv' in pkgdb['main']:
705 ex_dir = os.path.join(extracts_dir, pkgdb['main']['sv'].split()[0])
706 if not os.path.exists(ex_dir):
708 long_description = 'Description missing'
709 if 'long_description' in pkgdb['main']:
710 long_description=convert_longdescr(
711 assure_unicode(pkgdb['main']['long_description']))
712 page = template.render(
715 long_description=long_description,
721 # the following can be replaced by something like
722 # {% include "sidebar.html" ignore missing %}
723 # in the template whenever jinja 2.2 becomes available
724 addenum = os.path.join(os.path.abspath(addenum_dir), '%s.rst' % pkg)
725 if os.path.exists(addenum):
726 page += '\n\n.. include:: %s\n' % addenum
730 def store_db(db, filename):
731 pp = PrettyPrinter(indent=2)
732 f = codecs.open(filename, 'w', 'utf-8')
733 f.write(pp.pformat(db))
737 def read_db(filename):
738 f = codecs.open(filename, 'r', 'utf-8')
742 def write_sourceslist(jinja_env, cfg, outdir):
744 create_dir(os.path.join(outdir, 'lists'))
747 for release in cfg.options('release codenames'):
748 if release == 'data':
749 # no seperate list for the data archive
751 transrel = trans_codename(release, cfg)
753 for mirror in cfg.options('mirrors'):
754 listname = '%s.%s' % (release, mirror)
755 repos[transrel].append((mirror, listname))
756 lf = open(os.path.join(outdir, 'lists', listname), 'w')
757 for rel in ('data', release):
758 aptcfg = '%s %s main contrib non-free\n' % (cfg.get('mirrors', mirror),
760 lf.write('deb %s' % aptcfg)
761 lf.write('#deb-src %s' % aptcfg)
764 id2codename = dict([(cfg.get('release backport ids', r), r)
765 for r in cfg.options('release codenames')])
766 id2relname = dict([(cfg.get('release backport ids', r), trans_codename(r, cfg))
767 for r in cfg.options('release codenames')])
768 mirror2name = dict([(m, cfg.get('mirror names', m))
769 for m in cfg.options('mirrors')])
770 mirror2url = dict([(m, cfg.get('mirrors', m))
771 for m in cfg.options('mirrors')])
772 srclist_template = jinja_env.get_template('sources_lists.rst')
773 sl = open(os.path.join(outdir, 'sources_lists'), 'w')
774 sl.write(srclist_template.render(id2codename=id2codename,
775 id2relname=id2relname,
776 mirror2name=mirror2name,
777 mirror2url=mirror2url))
781 def write_mirmonlists(cfg, outdir):
782 """Write list of mirrors in the format suitable for mirmon
784 It will reuse the same 'lists' directory
786 print "I: Composing mirmon lists"
787 outdir = os.path.join(outdir, 'lists')
790 for sec, sep in (('mirrors', ' '),
791 ('mirror names', ' - ')):
792 entries = ['%s%s%s' % (mirror, sep, cfg.get(sec, mirror))
793 for mirror in cfg.options('mirrors')]
794 f = open(os.path.join(outdir, 'mirmon-%s.txt' % sec.replace(' ', '-')),
796 f.write('\n'.join(entries + ['']))
800 def sort_by_tasks(db):
802 for pkg in db.keys():
803 if not 'blends' in db[pkg]:
806 blendinfo = db[pkg]['blends']
807 if not 'tasks' in blendinfo:
808 # no task info in blend data
810 taskinfo = blendinfo['tasks']
811 for task in taskinfo:
813 if not taskname in tasks:
816 tasks[taskname].append(pkg)
820 def sort_by_maintainer(db):
823 for pkg in db.keys():
826 # start with the blends info
827 if 'blends' in pkginfo and 'responsible' in pkginfo['blends']:
828 maint = pkginfo['blends']['responsible']
829 if not 'main' in db[pkg] and maint is None:
832 info = db[pkg]['main']
833 if not 'maintainer' in info and maint is None:
836 if 'original_maintainer' in info and not info['original_maintainer'] is None:
837 maint = info['original_maintainer']
838 elif 'maintainer' in info and not info['maintainer'] is None:
839 maint = info['maintainer']
841 # no sane maintainer info
843 # safeguard: <> confuses sphinx and we don't care about different emails
844 maint = maint[:maint.find('<')].strip()
845 # kick out non-ascii ones (should not be, but too tired to find the bug)
847 codecs.ascii_decode(maint)
848 except UnicodeEncodeError:
850 if not maint.lower() in maints:
851 maints[maint.lower()] = []
852 maint_ids[maint.lower()] = [maint]
854 maint_ids[maint.lower()].append(maint)
855 maints[maint.lower()].append(pkg)
859 out[maint_ids[m][0]] = np.unique(maints[m])
863 def sort_by_release(db):
865 for pkg in db.keys():
868 if not isinstance(sec, tuple):
869 # only releases are of interest
872 if not relname in rels:
875 rels[relname].append(pkg)
878 rels[r] = np.unique(rels[r])
882 def write_pkgpages(jinja_env, cfg, db, outdir, addenum_dir, extracts_dir):
884 create_dir(os.path.join(outdir, 'pkgs'))
885 create_dir(os.path.join(outdir, 'pkglists'))
886 # template for individual package listings
887 toc_template = jinja_env.get_template('pkgs_toc.rst')
888 # the high-level package list overview
889 hltoc = codecs.open(os.path.join(outdir, 'pkgs.rst'), 'w', 'utf-8')
890 hltoc.write('.. _pkglists:\n\n')
891 hltoc.write(underline_text('Software packages', '='))
892 defs = [(sort_by_tasks(db), 'By purpose', 'Packages for %s'),
893 (sort_by_release(db), 'By release', 'Packages for %s'),
894 (sort_by_maintainer(db), 'By maintainer', 'Packages by %s')]
896 # TOC for each thingie
897 pkgsdict, sectitle, title_tmpl = def_
898 hltoc.write(underline_text(sectitle, '-'))
899 ids = pkgsdict.keys()
902 label = ('pkgs-%s-%s' % (sectitle, id_)).lower().replace(' ', '_').replace('/', '_')
904 filtered_pkgs = [p for p in pkgsdict[id_] if p in db]
905 if not len(filtered_pkgs):
907 plist = toc_template.render(
909 title=underline_text(title_tmpl % id_, '='),
912 toc = codecs.open(os.path.join(outdir,
918 hltoc.write('* :ref:`%s`\n' % label)
922 # now a complete list of all packages
923 hltoc.write(underline_text('Complete list', '-'))
924 toc = codecs.open(os.path.join(outdir, 'pkglists', 'pkgs-all.rst'),
926 toc.write(toc_template.render(label='full_pkg_list',
927 title=underline_text('Complete package list', '='),
928 pkgs=db.keys(), db=db))
930 hltoc.write('* :ref:`full_pkg_list`\n')
933 # and now each individual package page
934 pkg_template = jinja_env.get_template('pkg.rst')
936 page = generate_pkgpage(p, cfg, db, pkg_template, addenum_dir, extracts_dir)
937 # when no page is available skip this package
940 pf = codecs.open(os.path.join(outdir, 'pkgs', p + '.rst'), 'w', 'utf-8')
945 def prepOptParser(op):
946 # use module docstring for help output
947 op.usage = "%s [OPTIONS]\n\n" % sys.argv[0] + __doc__
949 op.add_option("--db",
950 action="store", type="string", dest="db",
952 help="Database file to read. Default: None")
954 op.add_option("--cfg",
955 action="store", type="string", dest="cfg",
957 help="Repository config file.")
959 op.add_option("-o", "--outdir",
960 action="store", type="string", dest="outdir",
962 help="Target directory for ReST output. Default: None")
964 op.add_option("-r", "--release-url",
965 action="append", dest="release_urls",
968 op.add_option("--pkgaddenum", action="store", dest="addenum_dir",
969 type="string", default=None, help="None")
971 op.add_option("--extracts", action="store", dest="extracts_dir",
972 type="string", default=None, help="None")
976 op = OptionParser(version="%prog 0.0.2")
979 (opts, args) = op.parse_args()
982 print('There needs to be exactly one command')
988 print("'--cfg' option is mandatory.")
991 print("'--db' option is mandatory.")
995 cfg = SafeConfigParser()
998 if cmd == 'debug_ld':
999 # load the db from file
1000 db = read_db(opts.db)
1003 #for p in ['dtitk', 'psychopy', 'psytoolkit', 'ginkgo-cadx', 'gridengine-master', 'cctools']:
1004 if not 'long_description' in db[p]['main']:
1006 ld = db[p]['main']['long_description']
1008 print ">>>>>>>>> ", p
1011 print convert_longdescr(ld)
1013 # load existing db, unless renew is requested
1014 if cmd == 'updatedb':
1016 if cfg.has_option('packages', 'select taskfiles'):
1017 db = add_pkgfromtaskfile(db, cfg.get('packages',
1018 'select taskfiles').split())
1020 # add additional package names from config file
1021 if cfg.has_option('packages', 'select names'):
1022 for p in cfg.get('packages', 'select names').split():
1023 if not db.has_key(p):
1024 db[p] = get_emptydbentry()
1026 # get info from task files
1027 if cfg.has_option('packages', 'prospective'):
1028 for url in cfg.get('packages', 'prospective').split():
1029 db = import_blendstask(cfg, db, url)
1031 # parse NeuroDebian repository
1032 if cfg.has_option('neurodebian', 'releases'):
1033 for rurl in cfg.get('neurodebian', 'releases').split():
1034 db = import_release(cfg, db, rurl)
1036 # collect package information from DDE
1037 db = import_dde(cfg, db)
1038 # get info from NITRC
1039 db = import_nitrc(cfg, db)
1041 store_db(db, opts.db)
1045 # load the db from file
1046 db = read_db(opts.db)
1049 jinja_env = Environment(loader=PackageLoader('neurodebian', 'templates'))
1051 # generate package pages and TOC and write them to files
1052 write_pkgpages(jinja_env, cfg, db, opts.outdir, opts.addenum_dir, opts.extracts_dir)
1054 write_sourceslist(jinja_env, cfg, opts.outdir)
1056 write_mirmonlists(cfg, opts.outdir)
1058 if __name__ == "__main__":