2 """Tell me who you are!
9 from ConfigParser import SafeConfigParser
10 from optparse import OptionParser, OptionGroup, OptionConflictError
12 # Lets first assure no guarding (but annoying) warnings
14 warnings.simplefilter('ignore', FutureWarning)
15 warnings.filterwarnings('ignore',
16 'Module debian_bundle was already imported.*', UserWarning)
18 from debian import deb822
19 import apt # initializes the "_system" ;)
20 from apt_pkg import version_compare
34 from jinja2 import Environment, PackageLoader
36 from pprint import PrettyPrinter
39 class AptListsCache(object):
40 def __init__(self, cachedir='build/cache',
43 self.cachedir = cachedir
45 if not ro_cachedirs is None:
46 self.ro_cachedirs = ro_cachedirs
48 self.ro_cachedirs = []
51 create_dir(self.cachedir)
53 def get(self, url, update=False):
54 """Looks in the cache if the file is there and takes the cached one.
55 Otherwise it is downloaded first.
57 Knows how to deal with http:// and svn:// URLs.
62 # look whether it is compressed
63 cext = url.split('.')[-1]
64 if cext in ['gz', 'bz2']:
65 target_url = url[:-1 * len(cext) -1]
67 # assume not compressed
71 # turn url into a filename -- mimik what APT does for
73 tfilename = '_'.join(target_url.split('/')[2:])
75 # if we need to download anyway do not search
77 cfilename = os.path.join(self.cachedir, tfilename)
79 # look for the uncompressed file anywhere in the cache
81 for cp in [self.cachedir] + self.ro_cachedirs:
82 if os.path.exists(os.path.join(cp, tfilename)):
83 cfilename = os.path.join(cp, tfilename)
88 cfilename = os.path.join(self.cachedir, tfilename)
91 # if updated needed -- download
93 #print 'Caching file from %s' % url
95 if url.startswith('svn://'):
97 pysvn.Client().export(url, cfilename)
98 if url.startswith('http://'):
100 tempfile, ignored = urllib.urlretrieve(url)
105 decompressor = 'gzip'
107 decompressor = 'bzip2'
112 "Don't know how to decompress %s files" \
115 if not decompressor is None:
116 if subprocess.call([decompressor, '-d', '-q', '-f',
118 raise RuntimeError, \
119 "Something went wrong while decompressing '%s'" \
122 # move decompressed file into cache
123 shutil.move(os.path.splitext(tempfile)[0], cfilename)
125 # XXX do we need that if explicit filename is provided?
129 fh = codecs.open(cfilename, 'r', 'utf-8')
134 def add_pkgfromtaskfile(db, urls):
135 cache = AptListsCache()
141 # loop over all stanzas
142 for stanza in deb822.Packages.iter_paragraphs(fh):
143 if stanza.has_key('Depends'):
144 pkg = stanza['Depends']
145 elif stanza.has_key('Recommends'):
146 pkg = stanza['Recommends']
147 elif stanza.has_key('Suggests'):
148 pkg = stanza['Suggests']
152 # account for multiple packages per line
154 pkgs += [p.strip() for p in pkg.split(',')]
156 pkgs.append(pkg.strip())
159 if not db.has_key(p):
160 db[p] = get_emptydbentry()
164 def get_emptydbentry():
167 def import_blendstask(cfg, db, url):
168 cache = AptListsCache()
172 # figure out blend's task page URL, since they differ from blend to blend
173 urlsec = url.split('/')
174 blendname = urlsec[-3]
175 if blendname == 'debian-med':
176 taskpage_url = 'http://debian-med.alioth.debian.org/tasks/'
177 elif blendname == 'debian-science':
178 taskpage_url = 'http://blends.alioth.debian.org/science/tasks/'
180 raise ValueError('Unknown blend "%s"' % blendname)
181 taskpage_url += urlsec[-1]
183 for st in deb822.Packages.iter_paragraphs(fh):
184 if st.has_key('Task'):
185 task_name = st['Task']
186 task = (blendname, task_name, taskpage_url)
188 if st.has_key('Depends'):
190 elif st.has_key('Recommends'):
191 pkg = st['Recommends']
192 elif st.has_key('Suggests'):
195 # print 'Warning: Cannot determine name of prospective package ' \
196 # '... ignoring. Dump follows:'
200 # take care of pkg lists
201 for p in pkg.split(', '):
202 if not db.has_key(p):
203 print 'Ignoring blend package "%s"' % p
209 info['tasks'] = [task]
210 if st.has_key('License'):
211 info['license'] = st['License']
212 if st.has_key('Responsible'):
213 info['responsible'] = st['Responsible']
216 if st.has_key('Pkg-Description'):
217 descr = st['Pkg-Description'].split('\n')
218 info['description'] = descr[0].strip()
219 info['long_description'] = \
220 u'\n'.join(descr[1:])
222 # charge the basic property set
223 db[p]['main']['description'] = info['description']
224 db[p]['main']['long_description'] = info['long_description']
225 if st.has_key('WNPP'):
226 db[p]['main']['debian_itp'] = st['WNPP']
227 if st.has_key('Pkg-URL'):
228 db[p]['main']['other_pkg'] = st['Pkg-URL']
229 if st.has_key('Homepage'):
230 db[p]['main']['homepage'] = st['Homepage']
233 if st.has_key('Published-Title'):
234 title = st['Published-Title']
236 # trip trailing dot -- added later
237 pub = {'title': title[:-1]}
239 pub = {'title': title}
240 if st.has_key('Published-Authors'):
241 pub['authors'] = st['Published-Authors']
242 if st.has_key('Published-Year'):
243 pub['year'] = st['Published-Year']
244 if st.has_key('Published-In'):
245 pub['in'] = st['Published-In']
246 if st.has_key('Published-URL'):
247 pub['url'] = st['Published-URL']
248 if st.has_key('Published-DOI'):
249 pub['doi'] = st['Published-DOI']
250 # need at least one URL
251 if not pub.has_key('url'):
252 pub['url'] = "http://dx.doi.org/%s" % st['Published-DOI']
254 db[p]['main']['publication'] = pub
257 if st.has_key('Registration'):
258 db[p]['main']['registration'] = st['Registration']
261 if st.has_key('Remark'):
262 # prepend a single space to make it look like a long description
263 info['remark'] = convert_longdescr(' ' + st['Remark'])
265 # only store if there isn't something already
266 if not db[p].has_key('blends'):
267 db[p]['blends'] = info
269 # just add this tasks name and id
270 db[p]['blends']['tasks'].append(task)
272 # handle pkg name aliases
273 if p in cfg.options('blend package aliases'):
274 src_entry = db[p].copy()
275 # remove original entry
277 # copy the entry into all aliases
278 for alias in cfg.get('blend package aliases', p).split():
279 print "Aliasing %s to %s" % (p, alias)
280 db[alias] = copy.deepcopy(src_entry)
285 def get_releaseinfo(rurl):
286 cache = AptListsCache()
287 # root URL of the repository
288 baseurl = '/'.join(rurl.split('/')[:-1])
289 # get the release file from the cache
290 release_file = cache.get(rurl)
292 # create parser instance
293 rp = deb822.Release(release_file)
295 # architectures on this dist
296 archs = rp['Architectures'].split()
297 components = rp['Components'].split()
298 # compile a new codename that also considers the repository label
299 # to distinguish between official and unofficial repos.
301 origin = rp['Origin']
302 codename = rp['Codename']
303 labelcode = '_'.join([rp['Label'], rp['Codename']])
308 return {'baseurl': baseurl, 'archs': archs, 'components': components,
309 'codename': codename, 'label': label, 'labelcode': labelcode,
313 def build_pkgsurl(baseurl, component, arch):
314 return '/'.join([baseurl, component, 'binary-' + arch, 'Packages.bz2'])
317 def import_release(cfg, db, rurl):
318 cache = AptListsCache()
320 ri = get_releaseinfo(rurl)
322 # compile the list of Packages files to parse and parse them
323 for c in ri['components']:
324 for a in ri['archs']:
325 # compile packages URL
326 pkgsurl = build_pkgsurl(ri['baseurl'], c, a)
328 # retrieve from cache
329 packages_file = cache.get(pkgsurl)
332 for stanza in deb822.Packages.iter_paragraphs(packages_file):
333 db = _store_pkg(cfg, db, stanza, ri['origin'], ri['codename'], c, ri['baseurl'])
336 packages_file.close()
340 def _store_pkg(cfg, db, st, origin, codename, component, baseurl):
347 # only care for known packages
348 if not db.has_key(pkg):
349 # print 'Ignoring NeuroDebian package "%s"' % pkg
352 distkey = (trans_codename(codename, cfg), 'neurodebian-' + codename)
354 if db[pkg].has_key(distkey):
355 info = db[pkg][distkey]
357 info = {'architecture': []}
360 if not st['Architecture'] in info['architecture']:
361 info['architecture'].append(st['Architecture'])
362 info['maintainer'] = st['Maintainer']
363 if st.has_key('Homepage'):
364 info['homepage'] = st['Homepage']
365 info['version'] = st['Version']
368 info['distribution'] = origin
369 info['release'] = codename
370 info['component'] = component
373 info['poolurl'] = '/'.join([os.path.dirname(st['Filename'])])
376 descr = st['Description'].replace('%', '%%').split('\n')
377 info['description'] = descr[0].strip()
378 info['long_description'] = u'\n'.join(descr[1:])
380 db[pkg][distkey] = info
382 # charge the basic property set
383 db[pkg]['main']['description'] = info['description']
384 db[pkg]['main']['long_description'] = info['long_description']
385 if st.has_key('Source'):
386 db[pkg]['main']['sv'] = "%s %s" % (st['Source'], st['Version'])
388 db[pkg]['main']['sv'] = "%s %s" % (st['Package'], st['Version'])
389 if st.has_key('Homepage'):
390 db[pkg]['main']['homepage'] = st['Homepage']
391 if st.has_key('Recommends'):
392 db[pkg]['main']['recommends'] = st['Recommends']
397 def trans_codename(codename, cfg):
398 """Translate a known codename into a release description.
400 Unknown codenames will simply be returned as is.
402 # if we know something, tell
403 if codename in cfg.options('release codenames'):
404 return cfg.get('release codenames', codename)
409 def create_dir(path):
410 if os.path.exists(path):
413 ps = path.split(os.path.sep)
415 for i in range(1,len(ps) + 1):
416 p = os.path.sep.join(ps[:i])
418 if not os.path.exists(p):
422 def dde_get(url, fail=False):
423 # enforce delay to be friendly to DDE
426 data = json.load(urllib2.urlopen(url+"?t=json"))['r']
427 print "SUCCESS:", url
429 except urllib2.HTTPError, e:
430 print "NOINFO:", url, type(e)
432 except urllib2.URLError, e:
433 print "URLERROR:", url, type(e)
435 print "Permanant failure"
437 print "Try again after 30 seconds..."
439 return dde_get(url, fail=True)
440 except (StopIteration):
444 print "UDD-DOWN?:", url, type(e)
448 def nitrc_get(spec, fail=False):
449 nitrc_url = 'http://www.nitrc.org/export/site/projects.json.php'
451 # change into this from python 2.6 on
452 #data = json.loads(urllib2.urlopen(nitrc_url + '?spec=%s' % spec).read())
453 data = json.load(urllib2.urlopen(nitrc_url + '?spec=%s' % spec))
454 print "NITRC-SUCCESS:", spec
455 except urllib2.HTTPError, e:
456 print "NITRC-NOINFO:", spec, type(e)
458 except urllib2.URLError, e:
459 print "NITRC-URLERROR:", spec, type(e)
461 print "Permanant failure"
463 print "Try again after 30 seconds..."
465 return nitrc_get(spec, fail=True)
469 def parse_nitrc(data):
472 # simplify -- there is only one project in the data
473 project = data['projects'][0]
474 nitrc_filtered = {'downloads': 0,
476 for pkg in project['packages']:
477 for release in pkg['releases']:
478 for file in release['files']:
479 nitrc_filtered['downloads'] += file['download_count']
480 return nitrc_filtered
483 def import_nitrc(cfg, db):
485 if not cfg.has_option("nitrc ids", p):
487 nitrc_spec = cfg.get("nitrc ids", p)
488 nitrc_data = nitrc_get(nitrc_spec)
489 nitrc_excerpt = parse_nitrc(nitrc_data)
490 if not nitrc_excerpt is None:
491 db[p]['nitrc'] = nitrc_excerpt
495 def import_dde(cfg, db):
496 query_url = cfg.get('dde', 'pkgquery_url')
499 q = dde_get(query_url + "/packages/all/%s" % p)
501 # copy all stuff, while preserving non-overlapping information
502 for k, v in q.iteritems():
504 # get latest popcon info for debian and ubuntu
505 # cannot use origin field itself, since it is none for few packages
507 origin = q['drc'].split()[0]
508 if origin == 'ubuntu':
509 if q.has_key('popcon'):
510 db[p]['main']['ubuntu_popcon'] = q['popcon']
511 # if we have ubuntu, need to get debian
512 q = dde_get(query_url + "/packages/prio-debian-sid/%s" % p)
513 if q and q.has_key('popcon'):
514 db[p]['main']['debian_popcon'] = q['popcon']
515 elif origin == 'debian':
516 if q.has_key('popcon'):
517 db[p]['main']['debian_popcon'] = q['popcon']
518 # if we have debian, need to get ubuntu
519 q = dde_get(query_url + "/packages/prio-ubuntu-precise/%s" % p)
520 if q and q.has_key('popcon'):
521 db[p]['main']['ubuntu_popcon'] = q['popcon']
523 print("Ignoring unkown origin '%s' for package '%s'." \
526 # now get info for package from all releases in UDD
527 q = dde_get(query_url + "/dist/p:%s" % p)
530 # hold all info about this package per distribution release
533 distkey = (trans_codename(cp['release'], cfg),
534 "%s-%s" % (cp['distribution'], cp['release']))
535 if not info.has_key(distkey):
537 # turn into a list to append others later
538 info[distkey]['architecture'] = [info[distkey]['architecture']]
539 # accumulate data for multiple over archs
541 comp = version_compare(cp['version'],
542 info[distkey]['version'])
543 # found another arch for the same version
545 info[distkey]['architecture'].append(cp['architecture'])
546 # found newer version, dump the old ones
549 # turn into a list to append others later
550 info[distkey]['architecture'] = [info[distkey]['architecture']]
551 # simply ignore older versions
555 # finally assign the new package data
556 for k, v in info.iteritems():
561 def assure_unicode(s):
562 """Assure that argument is unicode
564 Necessary if strings are not carrying out Pythonish 'u' prefix to
565 signal UTF8 strings, but are in fact UTF8
567 if type(s) is unicode:
570 # attempt regular unicode call and if fails -- just decode it
574 except UnicodeDecodeError, e:
575 return s.decode('utf8')
577 return assure_unicode(str(s))
580 def convert_longdescr(ld):
583 yoh: I think all this long description conversion will keep giving
584 us problems since per se there is no strict regulations,
585 especially in blends files
588 ld = ld.replace('% ', '%% ')
589 ld = ld.replace(r'\t', ' ') # just in case assuming tab 4
591 re_leadblanks = re.compile("^ *")
592 re_itemized = re.compile("^[o*-+] +")
593 re_itemized_gr = re.compile("^( *)([-o*+] +)?(.*?)$")
594 re_description_gr = re.compile("^( *[^-]+ - )(.*?)$")
596 def unwrap_lines(lines):
600 match = re_itemized_gr.search(l).groups()
601 if ((len(match[0]) in indent_levels and match[1] is None)
602 or (len(match[0]) > max(indent_levels)+4)) \
603 and match[2].strip() != '.':
605 if not out[-1].endswith(" "):
611 indent_levels = [len(match[0])]
612 if match[1] is not None:
613 indent_levels += [len(match[0]) + len(match[1])]
614 if match[2].strip() == '.':
615 # reset though if '.'
619 def dedent_withlevel(lines):
620 """Dedent `lines` given in a list provide dedented lines and how much was dedented
622 nleading = min([re_leadblanks.search(l).span()[1]
624 return [l[nleading:] for l in lines], nleading
626 def block_lines(ld, level=0):
627 # so we got list of lines
628 # dedent all of them first
629 ld, level = dedent_withlevel(ld)
631 # lets collect them in blocks/paragraphs
632 # 1. into paragraphs split by '.'
633 blocks, block = [], None
635 # next block can begin if
637 # 2. it was an itemized list and all items begin with
638 # the same symbol or get further indented accordingly
639 # so let's first check if it is an itemized list
640 itemized_match = re_itemized.search(ld[0])
642 allow_indents = " "*itemized_match.span()[1]
646 if block is None or l.strip() == '.' \
647 or (len(l) and ( len(block) and (
648 (l.startswith(' ') and not block[-1].startswith(' '))
650 (not l.startswith(' ') and block[-1].startswith(' '))))):
658 return [block_lines(b, level+1) for b in blocks if len(b)]
660 def blocks_to_rst(bls, level=0):
661 # check if this block is an itemized beast
662 #itemized_match = re_itemized_gr.search(bls[0][0])
664 # res += ' 'allow_indents = " "*itemized_match.span()[1]
667 if isinstance(b, list):
669 out += " "*level + b[0] + '\n\n'
671 out += blocks_to_rst(b, level+1)
673 e = " "*level + b + '\n'
674 if not re_itemized.search(b):
677 elif len(e) and e[0] == ' ':
678 # strip 1 leading blank
684 ld = unwrap_lines(ld)
685 bls = block_lines(ld)
686 return blocks_to_rst(bls)
689 def underline_text(text, symbol):
690 underline = symbol * len(text)
691 return '%s\n%s\n' % (text, underline)
694 def generate_pkgpage(pkg, cfg, db, template, addenum_dir, extracts_dir):
695 # local binding for ease of use
697 # do nothing if there is not at least the very basic stuff
698 if not pkgdb['main'].has_key('description'):
700 title = '**%s** -- %s' % (pkg, pkgdb['main']['description'])
701 title = underline_text(title, '*')
704 if 'sv' in pkgdb['main']:
705 ex_dir = os.path.join(extracts_dir, pkgdb['main']['sv'].split()[0])
706 if not os.path.exists(ex_dir):
708 page = template.render(
711 long_description=convert_longdescr(
712 assure_unicode(pkgdb['main']['long_description'])),
718 # the following can be replaced by something like
719 # {% include "sidebar.html" ignore missing %}
720 # in the template whenever jinja 2.2 becomes available
721 addenum = os.path.join(os.path.abspath(addenum_dir), '%s.rst' % pkg)
722 if os.path.exists(addenum):
723 page += '\n\n.. include:: %s\n' % addenum
727 def store_db(db, filename):
728 pp = PrettyPrinter(indent=2)
729 f = codecs.open(filename, 'w', 'utf-8')
730 f.write(pp.pformat(db))
734 def read_db(filename):
735 f = codecs.open(filename, 'r', 'utf-8')
739 def write_sourceslist(jinja_env, cfg, outdir):
741 create_dir(os.path.join(outdir, 'lists'))
744 for release in cfg.options('release codenames'):
745 if release == 'data':
746 # no seperate list for the data archive
748 transrel = trans_codename(release, cfg)
750 for mirror in cfg.options('mirrors'):
751 listname = '%s.%s' % (release, mirror)
752 repos[transrel].append((mirror, listname))
753 lf = open(os.path.join(outdir, 'lists', listname), 'w')
754 for rel in ('data', release):
755 aptcfg = '%s %s main contrib non-free\n' % (cfg.get('mirrors', mirror),
757 lf.write('deb %s' % aptcfg)
758 lf.write('#deb-src %s' % aptcfg)
761 id2codename = dict([(cfg.get('release backport ids', r), r)
762 for r in cfg.options('release codenames')])
763 id2relname = dict([(cfg.get('release backport ids', r), trans_codename(r, cfg))
764 for r in cfg.options('release codenames')])
765 mirror2name = dict([(m, cfg.get('mirror names', m))
766 for m in cfg.options('mirrors')])
767 mirror2url = dict([(m, cfg.get('mirrors', m))
768 for m in cfg.options('mirrors')])
769 srclist_template = jinja_env.get_template('sources_lists.rst')
770 sl = open(os.path.join(outdir, 'sources_lists'), 'w')
771 sl.write(srclist_template.render(id2codename=id2codename,
772 id2relname=id2relname,
773 mirror2name=mirror2name,
774 mirror2url=mirror2url))
778 def sort_by_tasks(db):
780 for pkg in db.keys():
781 if not 'blends' in db[pkg]:
784 blendinfo = db[pkg]['blends']
785 if not 'tasks' in blendinfo:
786 # no task info in blend data
788 taskinfo = blendinfo['tasks']
789 for task in taskinfo:
791 if not taskname in tasks:
794 tasks[taskname].append(pkg)
798 def sort_by_maintainer(db):
801 for pkg in db.keys():
804 # start with the blends info
805 if 'blends' in pkginfo and 'responsible' in pkginfo['blends']:
806 maint = pkginfo['blends']['responsible']
807 if not 'main' in db[pkg] and maint is None:
810 info = db[pkg]['main']
811 if not 'maintainer' in info and maint is None:
814 if 'original_maintainer' in info and not info['original_maintainer'] is None:
815 maint = info['original_maintainer']
816 elif 'maintainer' in info and not info['maintainer'] is None:
817 maint = info['maintainer']
819 # no sane maintainer info
821 # safeguard: <> confuses sphinx and we don't care about different emails
822 maint = maint[:maint.find('<')].strip()
823 # kick out non-ascii ones (should not be, but too tired to find the bug)
825 codecs.ascii_decode(maint)
826 except UnicodeEncodeError:
828 if not maint.lower() in maints:
829 maints[maint.lower()] = []
830 maint_ids[maint.lower()] = [maint]
832 maint_ids[maint.lower()].append(maint)
833 maints[maint.lower()].append(pkg)
837 out[maint_ids[m][0]] = np.unique(maints[m])
841 def sort_by_release(db):
843 for pkg in db.keys():
846 if not isinstance(sec, tuple):
847 # only releases are of interest
850 if not relname in rels:
853 rels[relname].append(pkg)
856 rels[r] = np.unique(rels[r])
860 def write_pkgpages(jinja_env, cfg, db, outdir, addenum_dir, extracts_dir):
862 create_dir(os.path.join(outdir, 'pkgs'))
863 create_dir(os.path.join(outdir, 'pkglists'))
864 # template for individual package listings
865 toc_template = jinja_env.get_template('pkgs_toc.rst')
866 # the high-level package list overview
867 hltoc = codecs.open(os.path.join(outdir, 'pkgs.rst'), 'w', 'utf-8')
868 hltoc.write('.. _pkglists:\n\n')
869 hltoc.write(underline_text('Software packages', '='))
870 defs = [(sort_by_tasks(db), 'By purpose', 'Packages for %s'),
871 (sort_by_release(db), 'By release', 'Packages for %s'),
872 (sort_by_maintainer(db), 'By maintainer', 'Packages by %s')]
874 # TOC for each thingie
875 pkgsdict, sectitle, title_tmpl = def_
876 hltoc.write(underline_text(sectitle, '-'))
877 ids = pkgsdict.keys()
880 label = ('pkgs-%s-%s' % (sectitle, id_)).lower().replace(' ', '_').replace('/', '_')
882 filtered_pkgs = [p for p in pkgsdict[id_] if p in db]
883 if not len(filtered_pkgs):
885 plist = toc_template.render(
887 title=underline_text(title_tmpl % id_, '='),
890 toc = codecs.open(os.path.join(outdir,
896 hltoc.write('* :ref:`%s`\n' % label)
900 # now a complete list of all packages
901 hltoc.write(underline_text('Complete list', '-'))
902 toc = codecs.open(os.path.join(outdir, 'pkglists', 'pkgs-all.rst'),
904 toc.write(toc_template.render(label='full_pkg_list',
905 title=underline_text('Complete package list', '='),
906 pkgs=db.keys(), db=db))
908 hltoc.write('* :ref:`full_pkg_list`\n')
911 # and now each individual package page
912 pkg_template = jinja_env.get_template('pkg.rst')
914 page = generate_pkgpage(p, cfg, db, pkg_template, addenum_dir, extracts_dir)
915 # when no page is available skip this package
918 pf = codecs.open(os.path.join(outdir, 'pkgs', p + '.rst'), 'w', 'utf-8')
923 def prepOptParser(op):
924 # use module docstring for help output
925 op.usage = "%s [OPTIONS]\n\n" % sys.argv[0] + __doc__
927 op.add_option("--db",
928 action="store", type="string", dest="db",
930 help="Database file to read. Default: None")
932 op.add_option("--cfg",
933 action="store", type="string", dest="cfg",
935 help="Repository config file.")
937 op.add_option("-o", "--outdir",
938 action="store", type="string", dest="outdir",
940 help="Target directory for ReST output. Default: None")
942 op.add_option("-r", "--release-url",
943 action="append", dest="release_urls",
946 op.add_option("--pkgaddenum", action="store", dest="addenum_dir",
947 type="string", default=None, help="None")
949 op.add_option("--extracts", action="store", dest="extracts_dir",
950 type="string", default=None, help="None")
954 op = OptionParser(version="%prog 0.0.2")
957 (opts, args) = op.parse_args()
960 print('There needs to be exactly one command')
966 print("'--cfg' option is mandatory.")
969 print("'--db' option is mandatory.")
973 cfg = SafeConfigParser()
976 if cmd == 'debug_ld':
977 # load the db from file
978 db = read_db(opts.db)
981 #for p in ['dtitk', 'psychopy', 'psytoolkit', 'ginkgo-cadx', 'gridengine-master', 'cctools']:
982 if not 'long_description' in db[p]['main']:
984 ld = db[p]['main']['long_description']
986 print ">>>>>>>>> ", p
989 print convert_longdescr(ld)
991 # load existing db, unless renew is requested
992 if cmd == 'updatedb':
994 if cfg.has_option('packages', 'select taskfiles'):
995 db = add_pkgfromtaskfile(db, cfg.get('packages',
996 'select taskfiles').split())
998 # add additional package names from config file
999 if cfg.has_option('packages', 'select names'):
1000 for p in cfg.get('packages', 'select names').split():
1001 if not db.has_key(p):
1002 db[p] = get_emptydbentry()
1004 # get info from task files
1005 if cfg.has_option('packages', 'prospective'):
1006 for url in cfg.get('packages', 'prospective').split():
1007 db = import_blendstask(cfg, db, url)
1009 # parse NeuroDebian repository
1010 if cfg.has_option('neurodebian', 'releases'):
1011 for rurl in cfg.get('neurodebian', 'releases').split():
1012 db = import_release(cfg, db, rurl)
1014 # collect package information from DDE
1015 db = import_dde(cfg, db)
1016 # get info from NITRC
1017 db = import_nitrc(cfg, db)
1019 store_db(db, opts.db)
1023 # load the db from file
1024 db = read_db(opts.db)
1027 jinja_env = Environment(loader=PackageLoader('neurodebian', 'templates'))
1029 # generate package pages and TOC and write them to files
1030 write_pkgpages(jinja_env, cfg, db, opts.outdir, opts.addenum_dir, opts.extracts_dir)
1032 write_sourceslist(jinja_env, cfg, opts.outdir)
1034 if __name__ == "__main__":