if changes["source"] == "dpkg":
fpr = changes["fingerprint"]
- (uid, uid_name) = dak_module.lookup_uid_from_fingerprint(fpr)
+ (uid, uid_name, is_dm) = dak_module.lookup_uid_from_fingerprint(fpr)
if fpr == "5906F687BD03ACAD0D8E602EFCF37657" or uid == "iwj":
reject("Upload blocked due to hijack attempt 2008/03/19")
filename = "%s/dists/%s/%s/source/Sources.gz" % (Cnf["Dir::Root"], suite, component)
print "Processing %s..." % (filename)
# apt_pkg.ParseTagFile needs a real file handle and can't handle a GzipFile instance...
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
if (result != 0):
sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output))
% (Cnf["Dir::Root"], suite, component, architecture)
print "Processing %s..." % (filename)
# apt_pkg.ParseTagFile needs a real file handle and can't handle a GzipFile instance...
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
if (result != 0):
sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output))
FROM src_associations sa, source s, source s2, src_associations sa2
WHERE sa.suite = %s AND sa2.suite = %d AND sa.source = s.id
AND sa2.source = s2.id AND s.source = s2.source
- AND versioncmp(s.version, s2.version) < 0""" % (experimental_id,
- database.get_suite_id("unstable")))
+ AND s.version < s2.version""" % (experimental_id,
+ database.get_suite_id("unstable")))
ql = q.getresult()
if ql:
nviu_to_remove = []
for component in components:
filename = "%s/dists/%s/%s/source/Sources.gz" % (Cnf["Dir::Root"], suite, component)
# apt_pkg.ParseTagFile needs a real file handle and can't handle a GzipFile instance...
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
if (result != 0):
sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output))
for architecture in architectures:
filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (Cnf["Dir::Root"], suite, component, architecture)
# apt_pkg.ParseTagFile needs a real file handle
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
if (result != 0):
sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output))
"Generates Maintainers file for BTS etc"),
("make-overrides",
"Generates override files"),
- ("mirror-split",
- "Split the pool/ by architecture groups"),
("poolize",
"Move packages from dists/ to pool/"),
("reject-proposed-updates",
--- /dev/null
+#!/usr/bin/env python
+# coding=utf8
+
+# Debian Archive Kit Database Update Script
+# Copyright © 2008 Michael Casadevall <mcasadevall@debian.org>
+# Copyright © 2008 Roger Leigh <rleigh@debian.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+import psycopg2, time
+
+################################################################################
+
+def do_update(self):
+ print "Note: to be able to enable the the PL/Perl (plperl) procedural language, we do"
+ print "need postgresql-plperl-$postgres-version installed. Make sure that this is the"
+ print "case before you continue. Interrupt if it isn't, sleeping 5 seconds now."
+ print "(We need to be database superuser for this to work!)"
+ time.sleep (5)
+
+ try:
+ c = self.db.cursor()
+
+ print "Enabling PL/Perl language"
+ c.execute("CREATE LANGUAGE plperl;")
+ c.execute("CREATE LANGUAGE plpgsql;")
+
+ print "Adding debversion type to database."
+
+# Not present in all databases, maybe PL/Perl version-dependent?
+# c.execute("SET SESSION plperl.use_strict TO 't';")
+
+ c.execute("CREATE DOMAIN debversion AS TEXT;")
+ c.execute("COMMENT ON DOMAIN debversion IS 'Debian package version number';")
+
+ c.execute("""ALTER DOMAIN debversion
+ ADD CONSTRAINT debversion_syntax
+ CHECK (VALUE !~ '[^-+:.0-9a-zA-Z~]');""")
+
+ # From Dpkg::Version::parseversion
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_split (debversion)
+ RETURNS text[] AS $$
+ my $ver = shift;
+ my %verhash;
+ if ($ver =~ /:/)
+ {
+ $ver =~ /^(\d+):(.+)/ or die "bad version number '$ver'";
+ $verhash{epoch} = $1;
+ $ver = $2;
+ }
+ else
+ {
+ $verhash{epoch} = 0;
+ }
+ if ($ver =~ /(.+)-(.*)$/)
+ {
+ $verhash{version} = $1;
+ $verhash{revision} = $2;
+ }
+ else
+ {
+ $verhash{version} = $ver;
+ $verhash{revision} = 0;
+ }
+
+ return [$verhash{'epoch'}, $verhash{'version'}, $verhash{'revision'}];
+$$
+ LANGUAGE plperl
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_split (debversion)
+ IS 'Split debian version into epoch, upstream version and revision';""")
+
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_epoch (version debversion)
+ RETURNS text AS $$
+DECLARE
+ split text[];
+BEGIN
+ split := debversion_split(version);
+ RETURN split[1];
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;
+COMMENT ON FUNCTION debversion_epoch (debversion)
+ IS 'Get debian version epoch';
+
+CREATE OR REPLACE FUNCTION debversion_version (version debversion)
+ RETURNS text AS $$
+DECLARE
+ split text[];
+BEGIN
+ split := debversion_split(version);
+ RETURN split[2];
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_version (debversion)
+ IS 'Get debian version upstream version';""")
+
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_revision (version debversion)
+ RETURNS text AS $$
+DECLARE
+ split text[];
+BEGIN
+ split := debversion_split(version);
+ RETURN split[3];
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_revision (debversion)
+ IS 'Get debian version revision';""")
+
+# From Dpkg::Version::parseversion
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_compare_single (version1 text, version2 text)
+ RETURNS integer AS $$
+ sub order{
+ my ($x) = @_;
+ ##define order(x) ((x) == '~' ? -1 \
+ # : cisdigit((x)) ? 0 \
+ # : !(x) ? 0 \
+ # : cisalpha((x)) ? (x) \
+ # : (x) + 256)
+ # This comparison is out of dpkg's order to avoid
+ # comparing things to undef and triggering warnings.
+ if (not defined $x or not length $x) {
+ return 0;
+ }
+ elsif ($x eq '~') {
+ return -1;
+ }
+ elsif ($x =~ /^\d$/) {
+ return 0;
+ }
+ elsif ($x =~ /^[A-Z]$/i) {
+ return ord($x);
+ }
+ else {
+ return ord($x) + 256;
+ }
+ }
+
+ sub next_elem(\@){
+ my $a = shift;
+ return @{$a} ? shift @{$a} : undef;
+ }
+ my ($val, $ref) = @_;
+ $val = "" if not defined $val;
+ $ref = "" if not defined $ref;
+ my @val = split //,$val;
+ my @ref = split //,$ref;
+ my $vc = next_elem @val;
+ my $rc = next_elem @ref;
+ while (defined $vc or defined $rc) {
+ my $first_diff = 0;
+ while ((defined $vc and $vc !~ /^\d$/) or
+ (defined $rc and $rc !~ /^\d$/)) {
+ my $vo = order($vc); my $ro = order($rc);
+ # Unlike dpkg's verrevcmp, we only return 1 or -1 here.
+ return (($vo - $ro > 0) ? 1 : -1) if $vo != $ro;
+ $vc = next_elem @val; $rc = next_elem @ref;
+ }
+ while (defined $vc and $vc eq '0') {
+ $vc = next_elem @val;
+ }
+ while (defined $rc and $rc eq '0') {
+ $rc = next_elem @ref;
+ }
+ while (defined $vc and $vc =~ /^\d$/ and
+ defined $rc and $rc =~ /^\d$/) {
+ $first_diff = ord($vc) - ord($rc) if !$first_diff;
+ $vc = next_elem @val; $rc = next_elem @ref;
+ }
+ return 1 if defined $vc and $vc =~ /^\d$/;
+ return -1 if defined $rc and $rc =~ /^\d$/;
+ return (($first_diff > 0) ? 1 : -1) if $first_diff;
+ }
+ return 0;
+$$
+ LANGUAGE plperl
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_compare_single (text, text)
+ IS 'Compare upstream or revision parts of Debian versions';""")
+
+# Logic only derived from Dpkg::Version::parseversion
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_compare (version1 debversion, version2 debversion)
+ RETURNS integer AS $$
+DECLARE
+ split1 text[];
+ split2 text[];
+ result integer;
+BEGIN
+ result := 0;
+ split1 := debversion_split(version1);
+ split2 := debversion_split(version2);
+
+ -- RAISE NOTICE 'Version 1: %', version1;
+ -- RAISE NOTICE 'Version 2: %', version2;
+ -- RAISE NOTICE 'Split 1: %', split1;
+ -- RAISE NOTICE 'Split 2: %', split2;
+
+ IF split1[1] > split2[1] THEN
+ result := 1;
+ ELSIF split1[1] < split2[1] THEN
+ result := -1;
+ ELSE
+ result := debversion_compare_single(split1[2], split2[2]);
+ IF result = 0 THEN
+ result := debversion_compare_single(split1[3], split2[3]);
+ END IF;
+ END IF;
+
+ RETURN result;
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_compare (debversion, debversion)
+ IS 'Compare Debian versions';""")
+
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_eq (version1 debversion, version2 debversion)
+ RETURNS boolean AS $$
+DECLARE
+ comp integer;
+ result boolean;
+BEGIN
+ comp := debversion_compare(version1, version2);
+ result := comp = 0;
+ RETURN result;
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_eq (debversion, debversion)
+ IS 'debversion equal';""")
+
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_ne (version1 debversion, version2 debversion)
+ RETURNS boolean AS $$
+DECLARE
+ comp integer;
+ result boolean;
+BEGIN
+ comp := debversion_compare(version1, version2);
+ result := comp <> 0;
+ RETURN result;
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_ne (debversion, debversion)
+ IS 'debversion not equal';""")
+
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_lt (version1 debversion, version2 debversion)
+ RETURNS boolean AS $$
+DECLARE
+ comp integer;
+ result boolean;
+BEGIN
+ comp := debversion_compare(version1, version2);
+ result := comp < 0;
+ RETURN result;
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_lt (debversion, debversion)
+ IS 'debversion less-than';""")
+
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_gt (version1 debversion, version2 debversion) RETURNS boolean AS $$
+DECLARE
+ comp integer;
+ result boolean;
+BEGIN
+ comp := debversion_compare(version1, version2);
+ result := comp > 0;
+ RETURN result;
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_gt (debversion, debversion)
+ IS 'debversion greater-than';""")
+
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_le (version1 debversion, version2 debversion)
+ RETURNS boolean AS $$
+DECLARE
+ comp integer;
+ result boolean;
+BEGIN
+ comp := debversion_compare(version1, version2);
+ result := comp <= 0;
+ RETURN result;
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_le (debversion, debversion)
+ IS 'debversion less-than-or-equal';""")
+
+ c.execute("""CREATE OR REPLACE FUNCTION debversion_ge (version1 debversion, version2 debversion)
+ RETURNS boolean AS $$
+DECLARE
+ comp integer;
+ result boolean;
+BEGIN
+ comp := debversion_compare(version1, version2);
+ result := comp >= 0;
+ RETURN result;
+END;
+$$
+ LANGUAGE plpgsql
+ IMMUTABLE STRICT;""")
+ c.execute("""COMMENT ON FUNCTION debversion_ge (debversion, debversion)
+ IS 'debversion greater-than-or-equal';""")
+
+ c.execute("""CREATE OPERATOR = (
+ PROCEDURE = debversion_eq,
+ LEFTARG = debversion,
+ RIGHTARG = debversion,
+ COMMUTATOR = =,
+ NEGATOR = !=);""")
+ c.execute("""COMMENT ON OPERATOR = (debversion, debversion)
+ IS 'debversion equal';""")
+
+ c.execute("""CREATE OPERATOR != (
+ PROCEDURE = debversion_eq,
+ LEFTARG = debversion,
+ RIGHTARG = debversion,
+ COMMUTATOR = !=,
+ NEGATOR = =);""")
+ c.execute("""COMMENT ON OPERATOR != (debversion, debversion)
+ IS 'debversion not equal';""")
+
+ c.execute("""CREATE OPERATOR < (
+ PROCEDURE = debversion_lt,
+ LEFTARG = debversion,
+ RIGHTARG = debversion,
+ COMMUTATOR = >,
+ NEGATOR = >=);""")
+ c.execute("""COMMENT ON OPERATOR < (debversion, debversion)
+ IS 'debversion less-than';""")
+
+ c.execute("""CREATE OPERATOR > (
+ PROCEDURE = debversion_gt,
+ LEFTARG = debversion,
+ RIGHTARG = debversion,
+ COMMUTATOR = <,
+ NEGATOR = >=);""")
+ c.execute("""COMMENT ON OPERATOR > (debversion, debversion)
+ IS 'debversion greater-than';""")
+
+ c.execute("""CREATE OPERATOR <= (
+ PROCEDURE = debversion_le,
+ LEFTARG = debversion,
+ RIGHTARG = debversion,
+ COMMUTATOR = >=,
+ NEGATOR = >);""")
+ c.execute("""COMMENT ON OPERATOR <= (debversion, debversion)
+ IS 'debversion less-than-or-equal';""")
+
+ c.execute("""CREATE OPERATOR >= (
+ PROCEDURE = debversion_ge,
+ LEFTARG = debversion,
+ RIGHTARG = debversion,
+ COMMUTATOR = <=,
+ NEGATOR = <);""")
+ c.execute("""COMMENT ON OPERATOR >= (debversion, debversion)
+ IS 'debversion greater-than-or-equal';""")
+
+ c.execute("ALTER TABLE source ALTER COLUMN version TYPE debversion;")
+ c.execute("ALTER TABLE binaries ALTER COLUMN version TYPE debversion;")
+
+ c.execute("UPDATE config SET value = '2' WHERE name = 'db_revision'")
+
+ self.db.commit()
+
+ except psycopg2.ProgrammingError, msg:
+ self.db.rollback()
+ print "FATAL: Unable to apply debversion table update 2!"
+ print "Error Message: " + str(msg)
+ print "Database changes have been rolled back."
--- /dev/null
+#!/usr/bin/env python
+
+# Debian Archive Kit Database Update Script
+# Copyright (C) 2008 Michael Casadevall <mcasadevall@debian.org>
+# Copyright (C) 2009 Joerg Jaspert <joerg@debian.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+################################################################################
+
+import psycopg2, time
+
+################################################################################
+
+def do_update(self):
+ print "Removing no longer used function versioncmp"
+
+ try:
+ c = self.db.cursor()
+ c.execute("DROP FUNCTION versioncmp(text, text);")
+ c.execute("UPDATE config SET value = '3' WHERE name = 'db_revision'")
+
+ self.db.commit()
+
+ except psycopg2.ProgrammingError, msg:
+ self.db.rollback()
+ print "FATAL: Unable to apply db update 3!"
+ print "Error Message: " + str(msg)
+ print "Database changes have been rolled back."
return (control, control_keys, section, depends, recommends, arch, maintainer)
-def read_changes_or_dsc (filename):
+def read_changes_or_dsc (suite, filename):
dsc = {}
dsc_file = utils.open_file(filename)
for k in dsc.keys():
if k in ("build-depends","build-depends-indep"):
- dsc[k] = create_depends_string(split_depends(dsc[k]))
+ dsc[k] = create_depends_string(suite, split_depends(dsc[k]))
elif k == "architecture":
if (dsc["architecture"] != "any"):
dsc['architecture'] = colour_output(dsc["architecture"], 'arch')
filecontents = '\n'.join(map(lambda x: format_field(x,dsc[x.lower()]), keysinorder))+'\n'
return filecontents
-def create_depends_string (depends_tree):
- # just look up unstable for now. possibly pull from .changes later
- suite = "unstable"
+def create_depends_string (suite, depends_tree):
result = ""
+ if suite == 'experimental':
+ suite_where = " in ('experimental','unstable')"
+ else:
+ suite_where = " ='%s'" % suite
+
comma_count = 1
for l in depends_tree:
if (comma_count >= 2):
result += " | "
# doesn't do version lookup yet.
- q = projectB.query("SELECT DISTINCT(b.package), b.version, c.name, su.suite_name FROM binaries b, files fi, location l, component c, bin_associations ba, suite su WHERE b.package='%s' AND b.file = fi.id AND fi.location = l.id AND l.component = c.id AND ba.bin=b.id AND ba.suite = su.id AND su.suite_name='%s' ORDER BY b.version desc" % (d['name'], suite))
+ q = projectB.query("SELECT DISTINCT(b.package), b.version, c.name, su.suite_name FROM binaries b, files fi, location l, component c, bin_associations ba, suite su WHERE b.package='%s' AND b.file = fi.id AND fi.location = l.id AND l.component = c.id AND ba.bin=b.id AND ba.suite = su.id AND su.suite_name %s ORDER BY b.version desc" % (d['name'], suite_where))
ql = q.getresult()
if ql:
i = ql[0]
comma_count += 1
return result
-def output_deb_info(filename):
+def output_deb_info(suite, filename):
(control, control_keys, section, depends, recommends, arch, maintainer) = read_control(filename)
if control == '':
to_print = ""
for key in control_keys :
if key == 'Depends':
- field_value = create_depends_string(depends)
+ field_value = create_depends_string(suite, depends)
elif key == 'Recommends':
- field_value = create_depends_string(recommends)
+ field_value = create_depends_string(suite, recommends)
elif key == 'Section':
field_value = section
elif key == 'Architecture':
printed_copyrights[copyrightmd5] = "%s (%s)" % (package, deb_filename)
return res+formatted_text(cright)
-def check_dsc (dsc_filename):
- (dsc) = read_changes_or_dsc(dsc_filename)
+def check_dsc (suite, dsc_filename):
+ (dsc) = read_changes_or_dsc(suite, dsc_filename)
foldable_output(dsc_filename, "dsc", dsc, norow=True)
foldable_output("lintian check for %s" % dsc_filename, "source-lintian", do_lintian(dsc_filename))
-def check_deb (deb_filename):
+def check_deb (suite, deb_filename):
filename = os.path.basename(deb_filename)
packagename = filename.split('_')[0]
foldable_output("control file for %s" % (filename), "binary-%s-control"%packagename,
- output_deb_info(deb_filename), norow=True)
+ output_deb_info(suite, deb_filename), norow=True)
if is_a_udeb:
foldable_output("skipping lintian check for udeb", "binary-%s-lintian"%packagename,
# Read a file, strip the signature and return the modified contents as
# a string.
def strip_pgp_signature (filename):
- file = utils.open_file (filename)
+ inputfile = utils.open_file (filename)
contents = ""
inside_signature = 0
skip_next = 0
- for line in file.readlines():
+ for line in inputfile.readlines():
if line[:-1] == "":
continue
if inside_signature:
inside_signature = 0
continue
contents += line
- file.close()
+ inputfile.close()
return contents
-def display_changes(changes_filename):
- changes = read_changes_or_dsc(changes_filename)
+def display_changes(suite, changes_filename):
+ changes = read_changes_or_dsc(suite, changes_filename)
foldable_output(changes_filename, "changes", changes, norow=True)
def check_changes (changes_filename):
- display_changes(changes_filename)
-
changes = utils.parse_changes (changes_filename)
+ display_changes(changes['distribution'], changes_filename)
+
files = utils.build_file_list(changes)
for f in files.keys():
if f.endswith(".deb") or f.endswith(".udeb"):
- check_deb(f)
+ check_deb(changes['distribution'], f)
if f.endswith(".dsc"):
- check_dsc(f)
+ check_dsc(changes['distribution'], f)
# else: => byhand
def main ():
if f.endswith(".changes"):
check_changes(f)
elif f.endswith(".deb") or f.endswith(".udeb"):
- check_deb(file)
+ # default to unstable when we don't have a .changes file
+ # perhaps this should be a command line option?
+ check_deb('unstable', file)
elif f.endswith(".dsc"):
- check_dsc(f)
+ check_dsc('unstable', f)
else:
utils.fubar("Unrecognised file type: '%s'." % (f))
finally:
################################################################################
-import sys, os, tempfile
+import sys
+import os
+import tempfile
+import subprocess
+import time
import apt_pkg
from daklib import utils
if upd.filesizesha1:
if upd.filesizesha1 != oldsizesha1:
- print "warning: old file seems to have changed! %s %s => %s %s" % (upd.filesizesha1 + oldsizesha1)
-
- # XXX this should be usable now
- #
- #for d in upd.history.keys():
- # df = smartopen("%s/%s" % (outdir,d))
- # act_sha1size = sizesha1(df)
- # df.close()
- # exp_sha1size = upd.history[d][1]
- # if act_sha1size != exp_sha1size:
- # print "patch file %s seems to have changed! %s %s => %s %s" % \
- # (d,) + exp_sha1size + act_sha1size
+ print "info: old file " + oldfile + " changed! %s %s => %s %s" % (upd.filesizesha1 + oldsizesha1)
if Options.has_key("CanonicalPath"): upd.can_path=Options["CanonicalPath"]
oldf.close()
print "%s: unchanged" % (origfile)
else:
- if not os.path.isdir(outdir): os.mkdir(outdir)
- w = os.popen("diff --ed - %s | gzip -c -9 > %s.gz" %
- (newfile, difffile), "w")
+ if not os.path.isdir(outdir):
+ os.mkdir(outdir)
+
+ cmd = "diff --ed - %s | gzip -c -9 > %s.gz" % (newfile, difffile)
+ # Do we need shell=True?
+ w = subprocess.Popen(cmd, shell=True, stdin=PIPE).stdin
+
+ # I bet subprocess can do that better than this, but lets do little steps
pipe_file(oldf, w)
oldf.close()
if not Options.has_key("PatchName"):
format = "%Y-%m-%d-%H%M.%S"
- i,o = os.popen2("date +%s" % (format))
- i.close()
- Options["PatchName"] = o.readline()[:-1]
- o.close()
+ Options["PatchName"] = time.strftime( format )
AptCnf = apt_pkg.newConfiguration()
apt_pkg.ReadConfigFileISC(AptCnf,utils.which_apt_conf_file())
################################################################################
-import sys, os, popen2, tempfile, stat, time, pg
+import sys, os, stat, time, pg
+import gzip, bz2
import apt_pkg
from daklib import utils
from daklib.dak_exceptions import *
result.append(file + ".bz2")
return result
-def create_temp_file (cmd):
- f = tempfile.TemporaryFile()
- r = popen2.popen2(cmd)
- r[1].close()
- r = r[0]
- size = 0
- while 1:
- x = r.readline()
- if not x:
- r.close()
- del x,r
- break
- f.write(x)
- size += len(x)
- f.flush()
- f.seek(0)
- return (size, f)
+decompressors = { 'zcat' : gzip.GzipFile,
+ 'bzip2' : bz2.BZ2File }
def print_md5sha_files (tree, files, hashop):
path = Cnf["Dir::Root"] + tree + "/"
for name in files:
+ hashvalue = ""
+ hashlen = 0
try:
if name[0] == "<":
j = name.index("/")
k = name.index(">")
(cat, ext, name) = (name[1:j], name[j+1:k], name[k+1:])
- (size, file_handle) = create_temp_file("%s %s%s%s" %
- (cat, path, name, ext))
+ file_handle = decompressors[ cat ]( "%s%s%s" % (path, name, ext) )
+ contents = file_handle.read()
+ hashvalue = hashop(contents)
+ hashlen = len(contents)
else:
- size = os.stat(path + name)[stat.ST_SIZE]
- file_handle = utils.open_file(path + name)
+ try:
+ file_handle = utils.open_file(path + name)
+ hashvalue = hashop(file_handle)
+ hashlen = os.stat(path + name).st_size
+ except:
+ raise
+ else:
+ if file_handle:
+ file_handle.close()
+
except CantOpenError:
print "ALERT: Couldn't open " + path + name
else:
- hash = hashop(file_handle)
- file_handle.close()
- out.write(" %s %8d %s\n" % (hash, size, name))
+ out.write(" %s %8d %s\n" % (hashvalue, hashlen, name))
def print_md5_files (tree, files):
print_md5sha_files (tree, files, apt_pkg.md5sum)
for arch in AptCnf["tree::%s::Architectures" % (tree)].split():
if arch == "source":
filepath = "%s/%s/Sources" % (sec, arch)
- for file in compressnames("tree::%s" % (tree), "Sources", filepath):
- files.append(file)
+ for cfile in compressnames("tree::%s" % (tree), "Sources", filepath):
+ files.append(cfile)
add_tiffani(files, Cnf["Dir::Root"] + tree, filepath)
else:
disks = "%s/disks-%s" % (sec, arch)
files.append("%s/%s/md5sum.txt" % (disks, dir))
filepath = "%s/binary-%s/Packages" % (sec, arch)
- for file in compressnames("tree::%s" % (tree), "Packages", filepath):
- files.append(file)
+ for cfile in compressnames("tree::%s" % (tree), "Packages", filepath):
+ files.append(cfile)
add_tiffani(files, Cnf["Dir::Root"] + tree, filepath)
if arch == "source":
for arch in AptCnf["tree::%s/%s::Architectures" % (tree,dis)].split():
if arch != "source": # always true
- for file in compressnames("tree::%s/%s" % (tree,dis),
+ for cfile in compressnames("tree::%s/%s" % (tree,dis),
"Packages",
"%s/%s/binary-%s/Packages" % (dis, sec, arch)):
- files.append(file)
+ files.append(cfile)
elif AptCnf.has_key("tree::%s::FakeDI" % (tree)):
usetree = AptCnf["tree::%s::FakeDI" % (tree)]
sec = AptCnf["tree::%s/main::Sections" % (usetree)].split()[0]
for arch in AptCnf["tree::%s/main::Architectures" % (usetree)].split():
if arch != "source": # always true
- for file in compressnames("tree::%s/main" % (usetree), "Packages", "main/%s/binary-%s/Packages" % (sec, arch)):
- files.append(file)
+ for cfile in compressnames("tree::%s/main" % (usetree), "Packages", "main/%s/binary-%s/Packages" % (sec, arch)):
+ files.append(cfile)
elif AptCnf.has_key("bindirectory::%s" % (tree)):
- for file in compressnames("bindirectory::%s" % (tree), "Packages", AptCnf["bindirectory::%s::Packages" % (tree)]):
- files.append(file.replace(tree+"/","",1))
- for file in compressnames("bindirectory::%s" % (tree), "Sources", AptCnf["bindirectory::%s::Sources" % (tree)]):
- files.append(file.replace(tree+"/","",1))
+ for cfile in compressnames("bindirectory::%s" % (tree), "Packages", AptCnf["bindirectory::%s::Packages" % (tree)]):
+ files.append(cfile.replace(tree+"/","",1))
+ for cfile in compressnames("bindirectory::%s" % (tree), "Sources", AptCnf["bindirectory::%s::Sources" % (tree)]):
+ files.append(cfile.replace(tree+"/","",1))
else:
print "ALERT: no tree/bindirectory for %s" % (tree)
###############################################################################
def do_sources(sources, suite, component, server):
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (sources, temp_filename))
if (result != 0):
utils.fubar("Gunzip invocation failed!\n%s" % (output), result)
byname = {}
byid = {}
q = projectB.query("SELECT id, uid, name FROM uid")
- for (id, uid, name) in q.getresult():
- byname[uid] = (id, name)
- byid[id] = (uid, name)
+ for (keyid, uid, name) in q.getresult():
+ byname[uid] = (keyid, name)
+ byid[keyid] = (uid, name)
return (byname, byid)
def get_fingerprint_info():
uid = entry["uid"][0]
name = get_ldap_name(entry)
fingerprints = entry["keyFingerPrint"]
- id = None
+ keyid = None
for f in fingerprints:
key = fpr_lookup.get(f, None)
if key not in keys: continue
keys[key]["uid"] = uid
- if id != None: continue
- id = database.get_or_set_uid_id(uid)
- byuid[id] = (uid, name)
- byname[uid] = (id, name)
+ if keyid != None: continue
+ keyid = database.get_or_set_uid_id(uid)
+ byuid[keyid] = (uid, name)
+ byname[uid] = (keyid, name)
return (byname, byuid)
keys[x]["uid"] = format % "invalid-uid"
else:
uid = format % keys[x]["email"]
- id = database.get_or_set_uid_id(uid)
- byuid[id] = (uid, keys[x]["name"])
- byname[uid] = (id, keys[x]["name"])
+ keyid = database.get_or_set_uid_id(uid)
+ byuid[keyid] = (uid, keys[x]["name"])
+ byname[uid] = (keyid, keys[x]["name"])
keys[x]["uid"] = uid
if any_invalid:
uid = format % "invalid-uid"
- id = database.get_or_set_uid_id(uid)
- byuid[id] = (uid, "ungeneratable user id")
- byname[uid] = (id, "ungeneratable user id")
+ keyid = database.get_or_set_uid_id(uid)
+ byuid[keyid] = (uid, "ungeneratable user id")
+ byname[uid] = (keyid, "ungeneratable user id")
return (byname, byuid)
################################################################################
(db_uid_byname, db_uid_byid) = get_uid_info()
### Update full names of applicable users
- for id in desuid_byid.keys():
- uid = (id, desuid_byid[id][0])
- name = desuid_byid[id][1]
- oname = db_uid_byid[id][1]
+ for keyid in desuid_byid.keys():
+ uid = (keyid, desuid_byid[keyid][0])
+ name = desuid_byid[keyid][1]
+ oname = db_uid_byid[keyid][1]
if name and oname != name:
changes.append((uid[1], "Full name: %s" % (name)))
projectB.query("UPDATE uid SET name = '%s' WHERE id = %s" %
- (pg.escape_string(name), id))
+ (pg.escape_string(name), keyid))
# The fingerprint table (fpr) points to a uid and a keyring.
# If the uid is being decided here (ldap/generate) we set it to it.
fpr = {}
for z in keyring.keys.keys():
- id = db_uid_byname.get(keyring.keys[z].get("uid", None), [None])[0]
- if id == None:
- id = db_fin_info.get(keyring.keys[z]["fingerprints"][0], [None])[0]
+ keyid = db_uid_byname.get(keyring.keys[z].get("uid", None), [None])[0]
+ if keyid == None:
+ keyid = db_fin_info.get(keyring.keys[z]["fingerprints"][0], [None])[0]
for y in keyring.keys[z]["fingerprints"]:
- fpr[y] = (id,keyring_id)
+ fpr[y] = (keyid,keyring_id)
# For any keys that used to be in this keyring, disassociate them.
# We don't change the uid, leaving that for historical info; if
print "Assigning %s to 0x%s." % (uid, fingerprint)
elif existing_uid == uid:
pass
- elif '@' not in existing_ui:
+ elif '@' not in existing_uid:
q = projectB.query("UPDATE fingerprint SET uid = %s WHERE id = %s" % (uid_id, fingerprint_id))
print "Promoting DM %s to DD %s with keyid 0x%s." % (existing_uid, uid, fingerprint)
else:
# Process any additional Maintainer files (e.g. from pseudo packages)
for filename in extra_files:
- file = utils.open_file(filename)
- for line in file.readlines():
+ extrafile = utils.open_file(filename)
+ for line in extrafile.readlines():
line = utils.re_comments.sub('', line).strip()
if line == "":
continue
if not packages.has_key(package) or version == '*' \
or apt_pkg.VersionCompare(packages[package]["version"], version) < 0:
packages[package] = { "maintainer": maintainer, "version": version }
- file.close()
+ extrafile.close()
package_keys = packages.keys()
package_keys.sort()
override_suite = Cnf["Suite::%s::OverrideCodeName" % (suite)]
for component in Cnf.SubTree("Component").List():
if component == "mixed":
- continue; # Ick
+ continue # Ick
for otype in Cnf.ValueList("OverrideType"):
if otype == "deb":
suffix = ""
elif otype == "udeb":
if component == "contrib":
- continue; # Ick2
+ continue # Ick2
suffix = ".debian-installer"
elif otype == "dsc":
suffix = ".src"
output = utils.open_file(filename, "w")
# Generate the final list of files
files = {}
- for id in list:
- path = packages[id]["path"]
- filename = packages[id]["filename"]
- file_id = packages[id]["file_id"]
+ for fileid in list:
+ path = packages[fileid]["path"]
+ filename = packages[fileid]["filename"]
+ file_id = packages[fileid]["file_id"]
if suite == "stable" and dislocated_files.has_key(file_id):
filename = dislocated_files[file_id]
else:
keys = files.keys()
keys.sort()
# Write the list of files out
- for file in keys:
- output.write(file+'\n')
+ for outfile in keys:
+ output.write(outfile+'\n')
output.close()
############################################################
output = utils.open_file(filename, "w")
# Generate the final list of files
files = {}
- for id in list:
- path = packages[id]["path"]
- filename = packages[id]["filename"]
- file_id = packages[id]["file_id"]
- pkg = packages[id]["pkg"]
+ for fileid in list:
+ path = packages[fileid]["path"]
+ filename = packages[fileid]["filename"]
+ file_id = packages[fileid]["file_id"]
+ pkg = packages[fileid]["pkg"]
if suite == "stable" and dislocated_files.has_key(file_id):
filename = dislocated_files[file_id]
else:
suite = packages[unique_id]["suite"]
component = packages[unique_id]["component"]
arch = packages[unique_id]["arch"]
- type = packages[unique_id]["type"]
+ packagetype = packages[unique_id]["type"]
d.setdefault(suite, {})
d[suite].setdefault(component, {})
d[suite][component].setdefault(arch, {})
- d[suite][component][arch].setdefault(type, [])
- d[suite][component][arch][type].append(unique_id)
+ d[suite][component][arch].setdefault(packagetype, [])
+ d[suite][component][arch][packagetype].append(unique_id)
# Flesh out the index
if not Options["Suite"]:
suites = Cnf.SubTree("Suite").List()
else:
components = utils.split_args(Options["Component"])
udeb_components = Cnf.ValueList("Suite::%s::UdebComponents" % (suite))
- udeb_components = udeb_components
for component in components:
d[suite].setdefault(component, {})
if component in udeb_components:
types = [ "dsc" ]
else:
types = binary_types
- for type in types:
- d[suite][component][arch].setdefault(type, [])
+ for packagetype in types:
+ d[suite][component][arch].setdefault(packagetype, [])
# Then walk it
for suite in d.keys():
if Cnf.has_key("Suite::%s::Components" % (suite)):
for arch in d[suite][component].keys():
if arch == "all":
continue
- for type in d[suite][component][arch].keys():
- list = d[suite][component][arch][type]
+ for packagetype in d[suite][component][arch].keys():
+ filelist = d[suite][component][arch][packagetype]
# If it's a binary, we need to add in the arch: all debs too
if arch != "source":
archall_suite = Cnf.get("Make-Suite-File-List::ArchAllMap::%s" % (suite))
if archall_suite:
- list.extend(d[archall_suite][component]["all"][type])
+ filelist.extend(d[archall_suite][component]["all"][packagetype])
elif d[suite][component].has_key("all") and \
- d[suite][component]["all"].has_key(type):
- list.extend(d[suite][component]["all"][type])
- write_filelist(suite, component, arch, type, list,
+ d[suite][component]["all"].has_key(packagetype):
+ filelist.extend(d[suite][component]["all"][packagetype])
+ write_filelist(suite, component, arch, packagetype, filelist,
packages, dislocated_files)
else: # legacy-mixed suite
- list = []
+ filelist = []
for component in d[suite].keys():
for arch in d[suite][component].keys():
- for type in d[suite][component][arch].keys():
- list.extend(d[suite][component][arch][type])
- write_legacy_mixed_filelist(suite, list, packages, dislocated_files)
+ for packagetype in d[suite][component][arch].keys():
+ filelist.extend(d[suite][component][arch][packagetype])
+ write_legacy_mixed_filelist(suite, filelist, packages, dislocated_files)
################################################################################
packages = {}
unique_id = 0
for i in ql:
- (id, pkg, arch, version, path, filename, component, file_id, suite, type) = i
+ (sourceid, pkg, arch, version, path, filename, component, file_id, suite, filetype) = i
# 'id' comes from either 'binaries' or 'source', so it's not unique
unique_id += 1
- packages[unique_id] = Dict(id=id, pkg=pkg, arch=arch, version=version,
+ packages[unique_id] = Dict(sourceid=sourceid, pkg=pkg, arch=arch, version=version,
path=path, filename=filename,
component=component, file_id=file_id,
- suite=suite, type = type)
+ suite=suite, filetype = filetype)
cleanup(packages)
write_filelists(packages, dislocated_files)
+++ /dev/null
-#!/usr/bin/env python
-
-# Prepare and maintain partial trees by architecture
-# Copyright (C) 2004, 2006 Daniel Silverstone <dsilvers@digital-scurf.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-
-###############################################################################
-## <kinnison> So Martin, do you have a quote for me yet?
-## <tbm> Make something damned stupid up and attribute it to me, that's okay
-###############################################################################
-
-import sys
-import apt_pkg
-
-from stat import S_ISDIR, S_ISLNK, S_ISREG
-import os
-import cPickle
-
-import daklib.utils
-
-## Master path is the main repository
-#MASTER_PATH = "/org/ftp.debian.org/scratch/dsilvers/master"
-
-MASTER_PATH = "***Configure Mirror-Split::FTPPath Please***"
-TREE_ROOT = "***Configure Mirror-Split::TreeRootPath Please***"
-TREE_DB_ROOT = "***Configure Mirror-Split::TreeDatabasePath Please***"
-trees = []
-
-Cnf = None
-
-###############################################################################
-# A MirrorSplitTarget is a representation of a target. It is a set of archs, a path
-# and whether or not the target includes source.
-##################
-
-class MirrorSplitTarget:
- def __init__(self, name, archs, source):
- self.name = name
- self.root = "%s/%s" % (TREE_ROOT,name)
- self.archs = archs.split(",")
- self.source = source
- self.dbpath = "%s/%s.db" % (TREE_DB_ROOT,name)
- self.db = MirrorSplitDB()
- if os.path.exists( self.dbpath ):
- self.db.load_from_file( self.dbpath )
-
- ## Save the db back to disk
- def save_db(self):
- self.db.save_to_file( self.dbpath )
-
- ## Returns true if it's a poolish match
- def poolish_match(self, path):
- for a in self.archs:
- if path.endswith( "_%s.deb" % (a) ):
- return 1
- if path.endswith( "_%s.udeb" % (a) ):
- return 1
- if self.source:
- if (path.endswith( ".tar.gz" ) or
- path.endswith( ".diff.gz" ) or
- path.endswith( ".dsc" )):
- return 1
- return 0
-
- ## Returns false if it's a badmatch distswise
- def distish_match(self,path):
- for a in self.archs:
- if path.endswith("/Contents-%s.gz" % (a)):
- return 1
- if path.find("/binary-%s/" % (a)) != -1:
- return 1
- if path.find("/installer-%s/" % (a)) != -1:
- return 1
- if path.find("/source/") != -1:
- if self.source:
- return 1
- else:
- return 0
- if path.find("/Contents-") != -1:
- return 0
- if path.find("/binary-") != -1:
- return 0
- if path.find("/installer-") != -1:
- return 0
- return 1
-
-##############################################################################
-# The applicable function is basically a predicate. Given a path and a
-# target object its job is to decide if the path conforms for the
-# target and thus is wanted.
-#
-# 'verbatim' is a list of files which are copied regardless
-# it should be loaded from a config file eventually
-##################
-
-verbatim = [
- ]
-
-verbprefix = [
- "/tools/",
- "/README",
- "/doc/"
- ]
-
-def applicable(path, target):
- if path.startswith("/pool/"):
- return target.poolish_match(path)
- if (path.startswith("/dists/") or
- path.startswith("/project/experimental/")):
- return target.distish_match(path)
- if path in verbatim:
- return 1
- for prefix in verbprefix:
- if path.startswith(prefix):
- return 1
- return 0
-
-
-##############################################################################
-# A MirrorSplitDir is a representation of a tree.
-# It distinguishes files dirs and links
-# Dirs are dicts of (name, MirrorSplitDir)
-# Files are dicts of (name, inode)
-# Links are dicts of (name, target)
-##############
-
-class MirrorSplitDir:
- def __init__(self):
- self.dirs = {}
- self.files = {}
- self.links = {}
-
-##############################################################################
-# A MirrorSplitDB is a container for a MirrorSplitDir...
-##############
-
-class MirrorSplitDB:
- ## Initialise a MirrorSplitDB as containing nothing
- def __init__(self):
- self.root = MirrorSplitDir()
-
- def _internal_recurse(self, path):
- bdir = MirrorSplitDir()
- dl = os.listdir( path )
- dl.sort()
- dirs = []
- for ln in dl:
- lnl = os.lstat( "%s/%s" % (path, ln) )
- if S_ISDIR(lnl[0]):
- dirs.append(ln)
- elif S_ISLNK(lnl[0]):
- bdir.links[ln] = os.readlink( "%s/%s" % (path, ln) )
- elif S_ISREG(lnl[0]):
- bdir.files[ln] = lnl[1]
- else:
- daklib.utils.fubar( "Confused by %s/%s -- not a dir, link or file" %
- ( path, ln ) )
- for d in dirs:
- bdir.dirs[d] = self._internal_recurse( "%s/%s" % (path,d) )
-
- return bdir
-
- ## Recurse through a given path, setting the sequence accordingly
- def init_from_dir(self, dirp):
- self.root = self._internal_recurse( dirp )
-
- ## Load this MirrorSplitDB from file
- def load_from_file(self, fname):
- f = open(fname, "r")
- self.root = cPickle.load(f)
- f.close()
-
- ## Save this MirrorSplitDB to a file
- def save_to_file(self, fname):
- f = open(fname, "w")
- cPickle.dump( self.root, f, 1 )
- f.close()
-
-
-##############################################################################
-# Helper functions for the tree syncing...
-##################
-
-def _pth(a,b):
- return "%s/%s" % (a,b)
-
-def do_mkdir(targ,path):
- if not os.path.exists( _pth(targ.root, path) ):
- os.makedirs( _pth(targ.root, path) )
-
-def do_mkdir_f(targ,path):
- do_mkdir(targ, os.path.dirname(path))
-
-def do_link(targ,path):
- do_mkdir_f(targ,path)
- os.link( _pth(MASTER_PATH, path),
- _pth(targ.root, path))
-
-def do_symlink(targ,path,link):
- do_mkdir_f(targ,path)
- os.symlink( link, _pth(targ.root, path) )
-
-def do_unlink(targ,path):
- os.unlink( _pth(targ.root, path) )
-
-def do_unlink_dir(targ,path):
- os.system( "rm -Rf '%s'" % _pth(targ.root, path) )
-
-##############################################################################
-# Reconciling a target with the sourcedb
-################
-
-def _internal_reconcile( path, srcdir, targdir, targ ):
- # Remove any links in targdir which aren't in srcdir
- # Or which aren't applicable
- rm = []
- for k in targdir.links.keys():
- if applicable( _pth(path, k), targ ):
- if not srcdir.links.has_key(k):
- rm.append(k)
- else:
- rm.append(k)
- for k in rm:
- #print "-L-", _pth(path,k)
- do_unlink(targ, _pth(path,k))
- del targdir.links[k]
-
- # Remove any files in targdir which aren't in srcdir
- # Or which aren't applicable
- rm = []
- for k in targdir.files.keys():
- if applicable( _pth(path, k), targ ):
- if not srcdir.files.has_key(k):
- rm.append(k)
- else:
- rm.append(k)
- for k in rm:
- #print "-F-", _pth(path,k)
- do_unlink(targ, _pth(path,k))
- del targdir.files[k]
-
- # Remove any dirs in targdir which aren't in srcdir
- rm = []
- for k in targdir.dirs.keys():
- if not srcdir.dirs.has_key(k):
- rm.append(k)
- for k in rm:
- #print "-D-", _pth(path,k)
- do_unlink_dir(targ, _pth(path,k))
- del targdir.dirs[k]
-
- # Add/update files
- for k in srcdir.files.keys():
- if applicable( _pth(path,k), targ ):
- if not targdir.files.has_key(k):
- #print "+F+", _pth(path,k)
- do_link( targ, _pth(path,k) )
- targdir.files[k] = srcdir.files[k]
- else:
- if targdir.files[k] != srcdir.files[k]:
- #print "*F*", _pth(path,k)
- do_unlink( targ, _pth(path,k) )
- do_link( targ, _pth(path,k) )
- targdir.files[k] = srcdir.files[k]
-
- # Add/update links
- for k in srcdir.links.keys():
- if applicable( _pth(path,k), targ ):
- if not targdir.links.has_key(k):
- targdir.links[k] = srcdir.links[k];
- #print "+L+",_pth(path,k), "->", srcdir.links[k]
- do_symlink( targ, _pth(path,k), targdir.links[k] )
- else:
- if targdir.links[k] != srcdir.links[k]:
- do_unlink( targ, _pth(path,k) )
- targdir.links[k] = srcdir.links[k]
- #print "*L*", _pth(path,k), "to ->", srcdir.links[k]
- do_symlink( targ, _pth(path,k), targdir.links[k] )
-
- # Do dirs
- for k in srcdir.dirs.keys():
- if not targdir.dirs.has_key(k):
- targdir.dirs[k] = MirrorSplitDir()
- #print "+D+", _pth(path,k)
- _internal_reconcile( _pth(path,k), srcdir.dirs[k],
- targdir.dirs[k], targ )
-
-
-def reconcile_target_db( src, targ ):
- _internal_reconcile( "", src.root, targ.db.root, targ )
-
-###############################################################################
-
-def load_config():
- global MASTER_PATH
- global TREE_ROOT
- global TREE_DB_ROOT
- global trees
-
- MASTER_PATH = Cnf["Mirror-Split::FTPPath"]
- TREE_ROOT = Cnf["Mirror-Split::TreeRootPath"]
- TREE_DB_ROOT = Cnf["Mirror-Split::TreeDatabasePath"]
-
- for a in Cnf.ValueList("Mirror-Split::BasicTrees"):
- trees.append( MirrorSplitTarget( a, "%s,all" % a, 1 ) )
-
- for n in Cnf.SubTree("Mirror-Split::CombinationTrees").List():
- archs = Cnf.ValueList("Mirror-Split::CombinationTrees::%s" % n)
- source = 0
- if "source" in archs:
- source = 1
- archs.remove("source")
- archs = ",".join(archs)
- trees.append( MirrorSplitTarget( n, archs, source ) )
-
-def do_list ():
- print "Master path",MASTER_PATH
- print "Trees at",TREE_ROOT
- print "DBs at",TREE_DB_ROOT
-
- for tree in trees:
- print tree.name,"contains",", ".join(tree.archs),
- if tree.source:
- print " [source]"
- else:
- print ""
-
-def do_help ():
- print """Usage: dak mirror-split [OPTIONS]
-Generate hardlink trees of certain architectures
-
- -h, --help show this help and exit
- -l, --list list the configuration and exit
-"""
-
-
-def main ():
- global Cnf
-
- Cnf = daklib.utils.get_conf()
-
- Arguments = [('h',"help","Mirror-Split::Options::Help"),
- ('l',"list","Mirror-Split::Options::List"),
- ]
-
- arguments = apt_pkg.ParseCommandLine(Cnf,Arguments,sys.argv)
- Cnf["Mirror-Split::Options::cake"] = ""
- Options = Cnf.SubTree("Mirror-Split::Options")
-
- print "Loading configuration..."
- load_config()
- print "Loaded."
-
- if Options.has_key("Help"):
- do_help()
- return
- if Options.has_key("List"):
- do_list()
- return
-
-
- src = MirrorSplitDB()
- print "Scanning", MASTER_PATH
- src.init_from_dir(MASTER_PATH)
- print "Scanned"
-
- for tree in trees:
- print "Reconciling tree:",tree.name
- reconcile_target_db( src, tree )
- print "Saving updated DB...",
- tree.save_db()
- print "Done"
-
-##############################################################################
-
-if __name__ == '__main__':
- main()
from daklib import database
import apt_pkg, os, sys, pwd, time, re, commands
-re_taint_free = re.compile(r"^['/;\-\+\.~\s\w]+$");
+re_taint_free = re.compile(r"^['/;\-\+\.~\s\w]+$")
Cnf = None
Options = None
# Retrieve current section/priority...
oldsection, oldsourcesection, oldpriority = None, None, None
- for type in ['source', 'binary']:
+ for packagetype in ['source', 'binary']:
eqdsc = '!='
- if type == 'source':
+ if packagetype == 'source':
eqdsc = '='
q = projectB.query("""
SELECT priority.priority AS prio, section.section AS sect, override_type.type AS type
utils.fubar("%s is ambiguous. Matches %d packages" % (package,q.ntuples()))
r = q.getresult()
- if type == 'binary':
+ if packagetype == 'binary':
oldsection = r[0][1]
oldpriority = r[0][0]
else:
def check():
propogate={}
nopropogate={}
- for file in files.keys():
+ for checkfile in files.keys():
# The .orig.tar.gz can disappear out from under us is it's a
# duplicate of one in the archive.
- if not files.has_key(file):
+ if not files.has_key(checkfile):
continue
# Check that the source still exists
- if files[file]["type"] == "deb":
- source_version = files[file]["source version"]
- source_package = files[file]["source package"]
+ if files[checkfile]["type"] == "deb":
+ source_version = files[checkfile]["source version"]
+ source_package = files[checkfile]["source package"]
if not changes["architecture"].has_key("source") \
and not Upload.source_exists(source_package, source_version, changes["distribution"].keys()):
- reject("no source found for %s %s (%s)." % (source_package, source_version, file))
+ reject("no source found for %s %s (%s)." % (source_package, source_version, checkfile))
# Version and file overwrite checks
if not installing_to_stable:
- if files[file]["type"] == "deb":
- reject(Upload.check_binary_against_db(file), "")
- elif files[file]["type"] == "dsc":
- reject(Upload.check_source_against_db(file), "")
- (reject_msg, is_in_incoming) = Upload.check_dsc_against_db(file)
+ if files[checkfile]["type"] == "deb":
+ reject(Upload.check_binary_against_db(checkfile), "")
+ elif files[checkfile]["type"] == "dsc":
+ reject(Upload.check_source_against_db(checkfile), "")
+ (reject_msg, is_in_incoming) = Upload.check_dsc_against_db(checkfile)
reject(reject_msg, "")
# propogate in the case it is in the override tables:
if changes.has_key("propdistribution"):
for suite in changes["propdistribution"].keys():
- if Upload.in_override_p(files[file]["package"], files[file]["component"], suite, files[file].get("dbtype",""), file):
+ if Upload.in_override_p(files[checkfile]["package"], files[checkfile]["component"], suite, files[checkfile].get("dbtype",""), checkfile):
propogate[suite] = 1
else:
nopropogate[suite] = 1
continue
changes["distribution"][suite] = 1
- for file in files.keys():
+ for checkfile in files.keys():
# Check the package is still in the override tables
for suite in changes["distribution"].keys():
- if not Upload.in_override_p(files[file]["package"], files[file]["component"], suite, files[file].get("dbtype",""), file):
- reject("%s is NEW for %s." % (file, suite))
+ if not Upload.in_override_p(files[checkfile]["package"], files[checkfile]["component"], suite, files[checkfile].get("dbtype",""), checkfile):
+ reject("%s is NEW for %s." % (checkfile, suite))
###############################################################################
return
# Add the .dsc file to the DB
- for file in files.keys():
- if files[file]["type"] == "dsc":
+ for newfile in files.keys():
+ if files[newfile]["type"] == "dsc":
package = dsc["source"]
version = dsc["version"] # NB: not files[file]["version"], that has no epoch
maintainer = dsc["maintainer"]
changedby_id = database.get_or_set_maintainer_id(changedby)
fingerprint_id = database.get_or_set_fingerprint_id(dsc["fingerprint"])
install_date = time.strftime("%Y-%m-%d")
- filename = files[file]["pool name"] + file
- dsc_component = files[file]["component"]
- dsc_location_id = files[file]["location id"]
+ filename = files[newfile]["pool name"] + newfile
+ dsc_component = files[newfile]["component"]
+ dsc_location_id = files[newfile]["location id"]
if dsc.has_key("dm-upload-allowed") and dsc["dm-upload-allowed"] == "yes":
dm_upload_allowed = "true"
else:
dm_upload_allowed = "false"
- if not files[file].has_key("files id") or not files[file]["files id"]:
- files[file]["files id"] = database.set_files_id (filename, files[file]["size"], files[file]["md5sum"], files[file]["sha1sum"], files[file]["sha256sum"], dsc_location_id)
+ if not files[newfile].has_key("files id") or not files[newfile]["files id"]:
+ files[newfile]["files id"] = database.set_files_id (filename, files[newfile]["size"], files[newfile]["md5sum"], files[newfile]["sha1sum"], files[newfile]["sha256sum"], dsc_location_id)
projectB.query("INSERT INTO source (source, version, maintainer, changedby, file, install_date, sig_fpr, dm_upload_allowed) VALUES ('%s', '%s', %d, %d, %d, '%s', %s, %s)"
- % (package, version, maintainer_id, changedby_id, files[file]["files id"], install_date, fingerprint_id, dm_upload_allowed))
+ % (package, version, maintainer_id, changedby_id, files[newfile]["files id"], install_date, fingerprint_id, dm_upload_allowed))
for suite in changes["distribution"].keys():
suite_id = database.get_suite_id(suite)
projectB.query("INSERT INTO src_associations (suite, source) VALUES (%d, currval('source_id_seq'))" % (suite_id))
# Add the source files to the DB (files and dsc_files)
- projectB.query("INSERT INTO dsc_files (source, file) VALUES (currval('source_id_seq'), %d)" % (files[file]["files id"]))
+ projectB.query("INSERT INTO dsc_files (source, file) VALUES (currval('source_id_seq'), %d)" % (files[newfile]["files id"]))
for dsc_file in dsc_files.keys():
- filename = files[file]["pool name"] + dsc_file
+ filename = files[newfile]["pool name"] + dsc_file
# If the .orig.tar.gz is already in the pool, it's
# files id is stored in dsc_files by check_dsc().
files_id = dsc_files[dsc_file].get("files id", None)
# Add the .deb files to the DB
- for file in files.keys():
- if files[file]["type"] == "deb":
- package = files[file]["package"]
- version = files[file]["version"]
- maintainer = files[file]["maintainer"]
+ for newfile in files.keys():
+ if files[newfile]["type"] == "deb":
+ package = files[newfile]["package"]
+ version = files[newfile]["version"]
+ maintainer = files[newfile]["maintainer"]
maintainer = maintainer.replace("'", "\\'")
maintainer_id = database.get_or_set_maintainer_id(maintainer)
fingerprint_id = database.get_or_set_fingerprint_id(changes["fingerprint"])
- architecture = files[file]["architecture"]
+ architecture = files[newfile]["architecture"]
architecture_id = database.get_architecture_id (architecture)
- type = files[file]["dbtype"]
- source = files[file]["source package"]
- source_version = files[file]["source version"]
- filename = files[file]["pool name"] + file
- if not files[file].has_key("location id") or not files[file]["location id"]:
- files[file]["location id"] = database.get_location_id(Cnf["Dir::Pool"],files[file]["component"],utils.where_am_i())
- if not files[file].has_key("files id") or not files[file]["files id"]:
- files[file]["files id"] = database.set_files_id (filename, files[file]["size"], files[file]["md5sum"], files[file]["sha1sum"], files[file]["sha256sum"], files[file]["location id"])
+ filetype = files[newfile]["dbtype"]
+ source = files[newfile]["source package"]
+ source_version = files[newfile]["source version"]
+ filename = files[newfile]["pool name"] + newfile
+ if not files[newfile].has_key("location id") or not files[newfile]["location id"]:
+ files[newfile]["location id"] = database.get_location_id(Cnf["Dir::Pool"],files[newfile]["component"],utils.where_am_i())
+ if not files[newfile].has_key("files id") or not files[newfile]["files id"]:
+ files[newfile]["files id"] = database.set_files_id (filename, files[newfile]["size"], files[newfile]["md5sum"], files[newfile]["sha1sum"], files[newfile]["sha256sum"], files[newfile]["location id"])
source_id = database.get_source_id (source, source_version)
if source_id:
projectB.query("INSERT INTO binaries (package, version, maintainer, source, architecture, file, type, sig_fpr) VALUES ('%s', '%s', %d, %d, %d, %d, '%s', %d)"
- % (package, version, maintainer_id, source_id, architecture_id, files[file]["files id"], type, fingerprint_id))
+ % (package, version, maintainer_id, source_id, architecture_id, files[newfile]["files id"], filetype, fingerprint_id))
else:
- raise NoSourceFieldError, "Unable to find a source id for %s (%s), %s, file %s, type %s, signed by %s" % (package, version, architecture, file, type, sig_fpr)
+ raise NoSourceFieldError, "Unable to find a source id for %s (%s), %s, file %s, type %s, signed by %s" % (package, version, architecture, newfile, filetype, changes["fingerprint"])
for suite in changes["distribution"].keys():
suite_id = database.get_suite_id(suite)
projectB.query("INSERT INTO bin_associations (suite, bin) VALUES (%d, currval('binaries_id_seq'))" % (suite_id))
continue
# First move the files to the new location
legacy_filename = qid["path"] + qid["filename"]
- pool_location = utils.poolify (changes["source"], files[file]["component"])
+ pool_location = utils.poolify (changes["source"], files[newfile]["component"])
pool_filename = pool_location + os.path.basename(qid["filename"])
destination = Cnf["Dir::Pool"] + pool_location
utils.move(legacy_filename, destination)
projectB.query("UPDATE dsc_files SET file = %s WHERE source = %s AND file = %s" % (new_files_id, database.get_source_id(changes["source"], changes["version"]), orig_tar_id))
# Install the files into the pool
- for file in files.keys():
- destination = Cnf["Dir::Pool"] + files[file]["pool name"] + file
- utils.move(file, destination)
- Logger.log(["installed", file, files[file]["type"], files[file]["size"], files[file]["architecture"]])
- install_bytes += float(files[file]["size"])
+ for newfile in files.keys():
+ destination = Cnf["Dir::Pool"] + files[newfile]["pool name"] + newfile
+ utils.move(newfile, destination)
+ Logger.log(["installed", newfile, files[newfile]["type"], files[newfile]["size"], files[newfile]["architecture"]])
+ install_bytes += float(files[newfile]["size"])
# Copy the .changes file across for suite which need it.
copy_changes = {}
dest_dir = Cnf["Dir::QueueBuild"]
if Cnf.FindB("Dinstall::SecurityQueueBuild"):
dest_dir = os.path.join(dest_dir, suite)
- for file in files.keys():
- dest = os.path.join(dest_dir, file)
+ for newfile in files.keys():
+ dest = os.path.join(dest_dir, newfile)
# Remove it from the list of packages for later processing by apt-ftparchive
projectB.query("UPDATE queue_build SET in_queue = 'f', last_used = '%s' WHERE filename = '%s' AND suite = %s" % (now_date, dest, suite_id))
if not Cnf.FindB("Dinstall::SecurityQueueBuild"):
# Update the symlink to point to the new location in the pool
- pool_location = utils.poolify (changes["source"], files[file]["component"])
- src = os.path.join(Cnf["Dir::Pool"], pool_location, os.path.basename(file))
+ pool_location = utils.poolify (changes["source"], files[newfile]["component"])
+ src = os.path.join(Cnf["Dir::Pool"], pool_location, os.path.basename(newfile))
if os.path.islink(dest):
os.unlink(dest)
os.symlink(src, dest)
projectB.query("BEGIN WORK")
# Add the source to stable (and remove it from proposed-updates)
- for file in files.keys():
- if files[file]["type"] == "dsc":
+ for newfile in files.keys():
+ if files[newfile]["type"] == "dsc":
package = dsc["source"]
version = dsc["version"]; # NB: not files[file]["version"], that has no epoch
q = projectB.query("SELECT id FROM source WHERE source = '%s' AND version = '%s'" % (package, version))
projectB.query("INSERT INTO src_associations (suite, source) VALUES ('%s', '%s')" % (suite_id, source_id))
# Add the binaries to stable (and remove it/them from proposed-updates)
- for file in files.keys():
- if files[file]["type"] == "deb":
- package = files[file]["package"]
- version = files[file]["version"]
- architecture = files[file]["architecture"]
+ for newfile in files.keys():
+ if files[newfile]["type"] == "deb":
+ package = files[newfile]["package"]
+ version = files[newfile]["version"]
+ architecture = files[newfile]["architecture"]
q = projectB.query("SELECT b.id FROM binaries b, architecture a WHERE b.package = '%s' AND b.version = '%s' AND (a.arch_string = '%s' OR a.arch_string = 'all') AND b.architecture = a.id" % (package, version, architecture))
ql = q.getresult()
if not ql:
os.unlink (new_changelog_filename)
new_changelog = utils.open_file(new_changelog_filename, 'w')
- for file in files.keys():
- if files[file]["type"] == "deb":
- new_changelog.write("stable/%s/binary-%s/%s\n" % (files[file]["component"], files[file]["architecture"], file))
- elif utils.re_issource.match(file):
- new_changelog.write("stable/%s/source/%s\n" % (files[file]["component"], file))
+ for newfile in files.keys():
+ if files[newfile]["type"] == "deb":
+ new_changelog.write("stable/%s/binary-%s/%s\n" % (files[newfile]["component"], files[newfile]["architecture"], newfile))
+ elif utils.re_issource.match(newfile):
+ new_changelog.write("stable/%s/source/%s\n" % (files[newfile]["component"], newfile))
else:
- new_changelog.write("%s\n" % (file))
+ new_changelog.write("%s\n" % (newfile))
chop_changes = queue.re_fdnic.sub("\n", changes["changes"])
new_changelog.write(chop_changes + '\n\n')
if os.access(changelog_filename, os.R_OK) != 0:
class Section_Completer:
def __init__ (self):
self.sections = []
+ self.matches = []
q = projectB.query("SELECT section FROM section")
for i in q.getresult():
self.sections.append(i[0])
class Priority_Completer:
def __init__ (self):
self.priorities = []
+ self.matches = []
q = projectB.query("SELECT priority FROM priority")
for i in q.getresult():
self.priorities.append(i[0])
def edit_new (new):
# Write the current data to a temporary file
- temp_filename = utils.temp_filename()
- temp_file = utils.open_file(temp_filename, 'w')
+ (fd, temp_filename) = utils.temp_filename()
+ temp_file = os.fdopen(fd, 'w')
print_new (new, 0, temp_file)
temp_file.close()
# Spawn an editor on that file
def edit_note(note):
# Write the current data to a temporary file
- temp_filename = utils.temp_filename()
- temp_file = utils.open_file(temp_filename, 'w')
+ (fd, temp_filename) = utils.temp_filename()
+ temp_file = os.fdopen(fd, 'w')
temp_file.write(note)
temp_file.close()
editor = os.environ.get("EDITOR","vi")
stdout_fd = sys.stdout
try:
sys.stdout = less_fd
- examine_package.display_changes(Upload.pkg.changes_file)
+ changes = utils.parse_changes (Upload.pkg.changes_file)
+ examine_package.display_changes(changes['distribution'], Upload.pkg.changes_file)
files = Upload.pkg.files
for f in files.keys():
if files[f].has_key("new"):
ftype = files[f]["type"]
if ftype == "deb":
- examine_package.check_deb(f)
+ examine_package.check_deb(changes['distribution'], f)
elif ftype == "dsc":
- examine_package.check_dsc(f)
+ examine_package.check_dsc(changes['distribution'], f)
finally:
sys.stdout = stdout_fd
except IOError, e:
def prod_maintainer ():
# Here we prepare an editor and get them ready to prod...
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
editor = os.environ.get("EDITOR","vi")
answer = 'E'
while answer == 'E':
os.system("%s %s" % (editor, temp_filename))
- f = utils.open_file(temp_filename)
+ f = os.fdopen(fd)
prod_message = "".join(f.readlines())
f.close()
print "Prod message:"
q = Upload.projectB.query("SELECT u.uid, u.name, k.debian_maintainer FROM fingerprint f JOIN keyrings k ON (f.keyring=k.id), uid u WHERE f.uid = u.id AND f.fingerprint = '%s'" % (fpr))
qs = q.getresult()
if len(qs) == 0:
- return (None, None)
+ return (None, None, None)
else:
return qs[0]
def do_stableupdate (summary, short_summary):
print "Moving to PROPOSED-UPDATES holding area."
- Logger.log(["Moving to proposed-updates", pkg.changes_file]);
+ Logger.log(["Moving to proposed-updates", pkg.changes_file])
- Upload.dump_vars(Cnf["Dir::Queue::ProposedUpdates"]);
+ Upload.dump_vars(Cnf["Dir::Queue::ProposedUpdates"])
move_to_dir(Cnf["Dir::Queue::ProposedUpdates"], perms=0664)
# Check for override disparities
- Upload.Subst["__SUMMARY__"] = summary;
- Upload.check_override();
+ Upload.Subst["__SUMMARY__"] = summary
+ Upload.check_override()
################################################################################
def do_oldstableupdate (summary, short_summary):
print "Moving to OLDSTABLE-PROPOSED-UPDATES holding area."
- Logger.log(["Moving to oldstable-proposed-updates", pkg.changes_file]);
+ Logger.log(["Moving to oldstable-proposed-updates", pkg.changes_file])
- Upload.dump_vars(Cnf["Dir::Queue::OldProposedUpdates"]);
+ Upload.dump_vars(Cnf["Dir::Queue::OldProposedUpdates"])
move_to_dir(Cnf["Dir::Queue::OldProposedUpdates"], perms=0664)
# Check for override disparities
- Upload.Subst["__SUMMARY__"] = summary;
- Upload.check_override();
+ Upload.Subst["__SUMMARY__"] = summary
+ Upload.check_override()
################################################################################
# If we weren't given a manual rejection message, spawn an editor
# so the user can add one in...
if not reject_message:
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
editor = os.environ.get("EDITOR","vi")
answer = 'E'
while answer == 'E':
os.system("%s %s" % (editor, temp_filename))
- f = utils.open_file(temp_filename)
+ f = os.fdopen(fd)
reject_message = "".join(f.readlines())
f.close()
print "Reject message:"
for component in components:
filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (Cnf["Dir::Root"], suites[0], component, architecture)
# apt_pkg.ParseTagFile needs a real file handle and can't handle a GzipFile instance...
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
if (result != 0):
utils.fubar("Gunzip invocation failed!\n%s\n" % (output), result)
for component in components:
filename = "%s/dists/%s/%s/source/Sources.gz" % (Cnf["Dir::Root"], suites[0], component)
# apt_pkg.ParseTagFile needs a real file handle and can't handle a GzipFile instance...
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
result, output = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_filename))
if result != 0:
sys.stderr.write("Gunzip invocation failed!\n%s\n" % (output))
# If we don't have a reason; spawn an editor so the user can add one
# Write the rejection email out as the <foo>.reason file
if not Options["Reason"] and not Options["No-Action"]:
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
editor = os.environ.get("EDITOR","vi")
result = os.system("%s %s" % (editor, temp_filename))
if result != 0:
changes = Upload.pkg.changes
changes["suite"] = copy.copy(changes["distribution"])
-
+ distribution = changes["distribution"].keys()[0]
# Find out what's new
new = queue.determine_new(changes, files, projectB, 0)
html_header(changes["source"], filestoexamine)
queue.check_valid(new)
- examine_package.display_changes(Upload.pkg.changes_file)
+ examine_package.display_changes( distribution, Upload.pkg.changes_file)
for fn in filter(lambda fn: fn.endswith(".dsc"), filestoexamine):
- examine_package.check_dsc(fn)
+ examine_package.check_dsc(distribution, fn)
for fn in filter(lambda fn: fn.endswith(".deb") or fn.endswith(".udeb"), filestoexamine):
- examine_package.check_deb(fn)
+ examine_package.check_deb(distribution, fn)
html_footer()
if sys.stdout != stdout_fd:
import psycopg2, sys, fcntl, os
import apt_pkg
import time
+import errno
from daklib import database
from daklib import utils
Cnf = None
projectB = None
-required_database_schema = 1
+required_database_schema = 3
################################################################################
name TEXT UNIQUE NOT NULL,
value TEXT
);""")
- c.execute("INSERT INTO config VALUES ( nextval('config_id_seq'), 'db_revision', '0')");
+ c.execute("INSERT INTO config VALUES ( nextval('config_id_seq'), 'db_revision', '0')")
self.db.commit()
except psycopg2.ProgrammingError:
try:
c = self.db.cursor()
- q = c.execute("SELECT value FROM config WHERE name = 'db_revision';");
+ q = c.execute("SELECT value FROM config WHERE name = 'db_revision';")
return c.fetchone()[0]
except psycopg2.ProgrammingError:
"""
def __init__(self, message=""):
+ Exception.__init__(self)
self.args = str(message)
self.message = str(message)
################################################################################
-import sys, time, types
+import sys
+import time
+import types
################################################################################
################################################################################
def init (config, sql):
+ """ database module init. Just sets two variables"""
global Cnf, projectB
Cnf = config
def do_query(q):
+ """
+ Executes a database query q. Writes statistics to stderr and returns
+ the result.
+
+ """
sys.stderr.write("query: \"%s\" ... " % (q))
before = time.time()
r = projectB.query(q)
################################################################################
def get_suite_id (suite):
+ """ Returns database suite_id for given suite, caches result. """
global suite_id_cache
if suite_id_cache.has_key(suite):
return suite_id
def get_section_id (section):
+ """ Returns database section_id for given section, caches result. """
global section_id_cache
if section_id_cache.has_key(section):
return section_id
def get_priority_id (priority):
+ """ Returns database priority_id for given priority, caches result. """
global priority_id_cache
if priority_id_cache.has_key(priority):
return priority_id
def get_override_type_id (type):
+ """ Returns database override_id for given override_type type, caches result. """
global override_type_id_cache
if override_type_id_cache.has_key(type):
return override_type_id
def get_architecture_id (architecture):
+ """ Returns database architecture_id for given architecture, caches result. """
global architecture_id_cache
if architecture_id_cache.has_key(architecture):
return architecture_id
def get_archive_id (archive):
+ """ Returns database archive_id for given archive, caches result. """
global archive_id_cache
archive = archive.lower()
return archive_id
def get_component_id (component):
+ """ Returns database component_id for given component, caches result. """
global component_id_cache
component = component.lower()
return component_id
def get_location_id (location, component, archive):
+ """
+ Returns database location_id for given combination of
+ location
+ component
+ archive.
+
+ The 3 parameters are the database ids returned by the respective
+ "get_foo_id" functions.
+
+ The result will be cached.
+
+ """
global location_id_cache
cache_key = location + '_' + component + '_' + location
return location_id
def get_source_id (source, version):
+ """ Returns database source_id for given combination of source and version, caches result. """
global source_id_cache
cache_key = source + '_' + version + '_'
return source_id
def get_suite_version(source, suite):
+ """ Returns database version for a given source in a given suite, caches result. """
global suite_version_cache
cache_key = "%s_%s" % (source, suite)
################################################################################
def get_or_set_maintainer_id (maintainer):
+ """
+ If maintainer does not have an entry in the maintainer table yet, create one
+ and return its id.
+ If maintainer already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global maintainer_id_cache
if maintainer_id_cache.has_key(maintainer):
################################################################################
def get_or_set_keyring_id (keyring):
+ """
+ If keyring does not have an entry in the keyring table yet, create one
+ and return its id.
+ If keyring already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global keyring_id_cache
if keyring_id_cache.has_key(keyring):
################################################################################
def get_or_set_uid_id (uid):
+ """
+ If uid does not have an entry in the uid table yet, create one
+ and return its id.
+ If uid already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global uid_id_cache
if uid_id_cache.has_key(uid):
################################################################################
def get_or_set_fingerprint_id (fingerprint):
+ """
+ If fingerprintd does not have an entry in the fingerprint table yet, create one
+ and return its id.
+ If fingerprint already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global fingerprint_id_cache
if fingerprint_id_cache.has_key(fingerprint):
################################################################################
def get_files_id (filename, size, md5sum, location_id):
+ """
+ Returns -1, -2 or the file_id for a given combination of
+ filename
+ size
+ md5sum
+ location_id.
+
+ The database is queried using filename and location_id, size and md5sum are for
+ extra checks.
+
+ Return values:
+ -1 - The given combination of arguments result in more (or less) than
+ one result from the database
+ -2 - The given size and md5sum do not match the values in the database
+ anything else is a file_id
+
+ Result is cached.
+
+ """
global files_id_cache
cache_key = "%s_%d" % (filename, location_id)
################################################################################
def get_or_set_queue_id (queue):
+ """
+ If queue does not have an entry in the queue_name table yet, create one
+ and return its id.
+ If queue already has an entry, simply return its id.
+
+ Result is cached.
+
+ """
global queue_id_cache
if queue_id_cache.has_key(queue):
################################################################################
def set_files_id (filename, size, md5sum, sha1sum, sha256sum, location_id):
+ """
+ Insert a new entry into the files table.
+
+ Returns the new file_id
+
+ """
global files_id_cache
projectB.query("INSERT INTO files (filename, size, md5sum, sha1sum, sha256sum, location) VALUES ('%s', %d, '%s', '%s', '%s', %d)" % (filename, long(size), md5sum, sha1sum, sha256sum, location_id))
################################################################################
def get_maintainer (maintainer_id):
+ """ Return the name of the maintainer behind maintainer_id """
global maintainer_cache
if not maintainer_cache.has_key(maintainer_id):
################################################################################
def get_suites(pkgname, src=False):
+ """ Return the suites in which pkgname is. If src is True, query for source package, else binary. """
if src:
- sql = "select suite_name from source, src_associations,suite where source.id=src_associations.source and source.source='%s' and src_associations.suite = suite.id"%pkgname
+ sql = """
+ SELECT suite_name
+ FROM source,
+ src_associations,
+ suite
+ WHERE source.id = src_associations.source
+ AND source.source = '%s'
+ AND src_associations.suite = suite.id
+ """ % (pkgname)
else:
- sql = "select suite_name from binaries, bin_associations,suite where binaries.id=bin_associations.bin and package='%s' and bin_associations.suite = suite.id"%pkgname
+ sql = """
+ SELECT suite_name
+ FROM binaries,
+ bin_associations,
+ suite
+ WHERE binaries.id = bin_associations.bin
+ AND package = '%s'
+ AND bin_associations.suite = suite.id
+ """ % (pkgname)
+
q = projectB.query(sql)
return map(lambda x: x[0], q.getresult())
self.Cnf = Cnf
self.accept_count = 0
self.accept_bytes = 0L
+ self.reject_message = ""
self.pkg = Pkg(changes = {}, dsc = {}, dsc_files = {}, files = {},
legacy_source_untouchable = {})
if not changes.has_key("distribution") or not isinstance(changes["distribution"], DictType):
changes["distribution"] = {}
- override_summary ="";
+ override_summary =""
file_keys = files.keys()
file_keys.sort()
for file_entry in file_keys:
if changes["architecture"].has_key("source") and \
dsc.has_key("bts changelog"):
- temp_filename = utils.temp_filename(Cnf["Dir::Queue::BTSVersionTrack"],
- dotprefix=1, perms=0644)
- version_history = utils.open_file(temp_filename, 'w')
+ (fd, temp_filename) = utils.temp_filename(Cnf["Dir::Queue::BTSVersionTrack"], prefix=".")
+ version_history = os.fdopen(fd, 'w')
version_history.write(dsc["bts changelog"])
version_history.close()
filename = "%s/%s" % (Cnf["Dir::Queue::BTSVersionTrack"],
changes_file[:-8]+".versions")
os.rename(temp_filename, filename)
+ os.chmod(filename, 0644)
# Write out the binary -> source mapping.
- temp_filename = utils.temp_filename(Cnf["Dir::Queue::BTSVersionTrack"],
- dotprefix=1, perms=0644)
- debinfo = utils.open_file(temp_filename, 'w')
+ (fd, temp_filename) = utils.temp_filename(Cnf["Dir::Queue::BTSVersionTrack"], prefix=".")
+ debinfo = os.fdopen(fd, 'w')
for file_entry in file_keys:
f = files[file_entry]
if f["type"] == "deb":
filename = "%s/%s" % (Cnf["Dir::Queue::BTSVersionTrack"],
changes_file[:-8]+".debinfo")
os.rename(temp_filename, filename)
+ os.chmod(filename, 0644)
self.queue_build("accepted", Cnf["Dir::Queue::Accepted"])
# If we weren't given a manual rejection message, spawn an
# editor so the user can add one in...
if manual and not reject_message:
- temp_filename = utils.temp_filename()
+ (fd, temp_filename) = utils.temp_filename()
editor = os.environ.get("EDITOR","vi")
answer = 'E'
while answer == 'E':
# for example, the package was in potato but had an -sa
# upload in woody. So we need to choose the right one.
- x = ql[0]; # default to something sane in case we don't match any or have only one
+ # default to something sane in case we don't match any or have only one
+ x = ql[0]
if len(ql) > 1:
for i in ql:
actual_size = os.stat(old_file)[stat.ST_SIZE]
found = old_file
suite_type = x[2]
- dsc_files[dsc_file]["files id"] = x[3]; # need this for updating dsc_files in install()
+ # need this for updating dsc_files in install()
+ dsc_files[dsc_file]["files id"] = x[3]
# See install() in process-accepted...
self.pkg.orig_tar_id = x[3]
self.pkg.orig_tar_gz = old_file
sys, tempfile, traceback, stat
import apt_pkg
import database
+import time
from dak_exceptions import *
################################################################################
field = 'checksums-%s' % hashname
if not field in manifest:
return rejmsg
- input = manifest[field]
- for line in input.split('\n'):
+ for line in manifest[field].split('\n'):
if not line:
break
- hash, size, file = line.strip().split(' ')
- if not files.has_key(file):
+ checksum, size, checkfile = line.strip().split(' ')
+ if not files.has_key(checkfile):
# TODO: check for the file's entry in the original files dict, not
# the one modified by (auto)byhand and other weird stuff
# rejmsg.append("%s: not present in files but in checksums-%s in %s" %
# (file, hashname, where))
continue
- if not files[file]["size"] == size:
+ if not files[checkfile]["size"] == size:
rejmsg.append("%s: size differs for files and checksums-%s entry "\
- "in %s" % (file, hashname, where))
+ "in %s" % (checkfile, hashname, where))
continue
- files[file][hash_key(hashname)] = hash
+ files[checkfile][hash_key(hashname)] = checksum
for f in files.keys():
if not files[f].has_key(hash_key(hashname)):
- rejmsg.append("%s: no entry in checksums-%s in %s" % (file,
+ rejmsg.append("%s: no entry in checksums-%s in %s" % (checkfile,
hashname, where))
return rejmsg
def send_mail (message, filename=""):
# If we've been passed a string dump it into a temporary file
if message:
- filename = tempfile.mktemp()
- fd = os.open(filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, 0700)
+ (fd, filename) = tempfile.mkstemp()
os.write (fd, message)
os.close (fd)
# Perform a substition of template
def TemplateSubst(map, filename):
- file = open_file(filename)
- template = file.read()
+ templatefile = open_file(filename)
+ template = templatefile.read()
for x in map.keys():
template = template.replace(x,map[x])
- file.close()
+ templatefile.close()
return template
################################################################################
################################################################################
def result_join (original, sep = '\t'):
- list = []
+ resultlist = []
for i in xrange(len(original)):
if original[i] == None:
- list.append("")
+ resultlist.append("")
else:
- list.append(original[i])
- return sep.join(list)
+ resultlist.append(original[i])
+ return sep.join(resultlist)
################################################################################
return "%s: tainted filename" % (filename)
# Invoke gpgv on the file
- status_read, status_write = os.pipe();
+ status_read, status_write = os.pipe()
cmd = "gpgv --status-fd %s --keyring /dev/null %s" % (status_write, filename)
(_, status, _) = gpgv_get_status_output(cmd, status_read, status_write)
return None
# Build the command line
- status_read, status_write = os.pipe();
+ status_read, status_write = os.pipe()
cmd = "gpgv --status-fd %s %s %s %s" % (
status_write, gpg_keyring_args(keyrings), sig_filename, data_filename)
if keywords.has_key("NODATA"):
reject("no signature found in %s." % (sig_filename))
bad = 1
+ if keywords.has_key("EXPKEYSIG"):
+ args = keywords["EXPKEYSIG"]
+ if len(args) >= 1:
+ key = args[0]
+ reject("Signature made by expired key 0x%s" % (key))
+ bad = 1
if keywords.has_key("KEYEXPIRED") and not keywords.has_key("GOODSIG"):
args = keywords["KEYEXPIRED"]
+ expiredate=""
if len(args) >= 1:
- key = args[0]
- reject("The key (0x%s) used to sign %s has expired." % (key, sig_filename))
+ timestamp = args[0]
+ if timestamp.count("T") == 0:
+ expiredate = time.strftime("%Y-%m-%d", time.gmtime(timestamp))
+ else:
+ expiredate = timestamp
+ reject("The key used to sign %s has expired on %s" % (sig_filename, expiredate))
bad = 1
if bad:
################################################################################
-def temp_filename(directory=None, dotprefix=None, perms=0700):
+def temp_filename(directory=None, prefix="dak", suffix=""):
"""Return a secure and unique filename by pre-creating it.
If 'directory' is non-null, it will be the directory the file is pre-created in.
-If 'dotprefix' is non-null, the filename will be prefixed with a '.'."""
+If 'prefix' is non-null, the filename will be prefixed with it, default is dak.
+If 'suffix' is non-null, the filename will end with it.
- if directory:
- old_tempdir = tempfile.tempdir
- tempfile.tempdir = directory
-
- filename = tempfile.mktemp()
-
- if dotprefix:
- filename = "%s/.%s" % (os.path.dirname(filename), os.path.basename(filename))
- fd = os.open(filename, os.O_RDWR|os.O_CREAT|os.O_EXCL, perms)
- os.close(fd)
-
- if directory:
- tempfile.tempdir = old_tempdir
+Returns a pair (fd, name).
+"""
- return filename
+ return tempfile.mkstemp(suffix, prefix, directory)
################################################################################
psql --html projectb <<EOF
SELECT uid.uid, uid.name, f.fingerprint
FROM uid LEFT OUTER JOIN fingerprint f ON (uid.id = f.uid)
- WHERE uid.uid LIKE 'dm:%'
+ WHERE uid.uid LIKE '%@%'
ORDER BY uid.uid;
EOF
JOIN uid u ON
(m.name LIKE u.name || ' <%>' OR
m.name LIKE '% <' || substring(u.uid FROM 4) || '>')
- WHERE u.uid LIKE 'dm:%' AND sa.suite = 5
+ WHERE u.uid LIKE '%@%' AND sa.suite = 5
)
ORDER BY uid.uid;
EOF
echo "Packages debian maintainers may update:"
psql --html projectb <<EOF
- SELECT s.source, s.version, u.uid
+ SELECT s.source, space_separated_list(s.version), u.uid
FROM src_uploaders su JOIN source s ON (su.source = s.id)
JOIN src_associations sa ON (s.id = sa.source)
JOIN maintainer m ON (su.maintainer = m.id)
JOIN uid u ON (m.name LIKE u.name || ' <%>' OR
- m.name LIKE '% <' || substring(u.uid FROM 4) || '>')
- WHERE u.uid LIKE 'dm:%' AND sa.suite = 5
+ m.name LIKE '% <' || substring(u.uid FROM 4) || '>')
+ WHERE s.dm_upload_allowed = 't' GROUP BY s.source, s.version, u.uid
ORDER BY u.uid, s.source, s.version;
EOF
SELECT s.source, s.version, s.install_date, u.uid
FROM source s JOIN fingerprint f ON (s.sig_fpr = f.id)
JOIN uid u ON (f.uid = u.id)
- WHERE u.uid LIKE 'dm:%'
+ WHERE u.uid LIKE '%@%'
ORDER BY u.uid, s.source, s.version;
EOF
FROM binaries b JOIN architecture a ON (b.architecture = a.id)
JOIN fingerprint f ON (b.sig_fpr = f.id)
JOIN uid u ON (f.uid = u.id)
- WHERE u.uid LIKE 'dm:%'
+ WHERE u.uid LIKE '%@%'
ORDER BY u.uid, b.package, b.version;
EOF
--- /dev/null
+#!/bin/bash
+
+set -e
+set -u
+
+FTPDIR="/org/upload.debian.org/ftp/pub/UploadQueue/"
+SSHDIR="/org/upload.debian.org/UploadQueue/"
+
+yes n | find ${FTPDIR} -type f -mmin +15 -print0 -exec mv -i --target-directory=${SSHDIR} "{}" +
+++ /dev/null
-#!/usr/bin/make -f
-
-CXXFLAGS = -I/usr/include/postgresql/ -I`pg_config --includedir-server` -fPIC -Wall
-CFLAGS = -fFIC -Wall `pg_config --cflags`
-LDFLAGS = `pg_config --ldflags`
-LIBS = -lapt-pkg `pg_config --libs`
-
-C++ = g++
-
-all: sql-aptvc.so
-
-sql-aptvc.o: sql-aptvc.cpp
-sql-aptvc.so: sql-aptvc.o
- $(CC) $(LDFLAGS) $(LIBS) -shared -o $@ $<
-clean:
- rm -f sql-aptvc.so sql-aptvc.o
-
+++ /dev/null
-/* Wrapper round apt's version compare functions for PostgreSQL. */
-/* Copyright (C) 2001, James Troup <james@nocrew.org> */
-
-/* This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* NB: do not try to use the VERSION-1 calling conventions for
- C-Language functions; it works on i386 but segfaults the postgres
- child backend on Sparc. */
-
-#include <apt-pkg/debversion.h>
-
-extern "C"
-{
-
-#include <postgres.h>
-#include <fmgr.h>
-
-#ifdef PG_MODULE_MAGIC
-PG_MODULE_MAGIC;
-#endif
-
- int versioncmp(text *A, text *B);
-
- int
- versioncmp (text *A, text *B)
- {
- int result, txt_size;
- char *a, *b;
-
- txt_size = VARSIZE(A)-VARHDRSZ;
- a = (char *) palloc(txt_size+1);
- memcpy(a, VARDATA(A), txt_size);
- a[txt_size] = '\0';
-
- txt_size = VARSIZE(B)-VARHDRSZ;
- b = (char *) palloc(txt_size+1);
- memcpy(b, VARDATA(B), txt_size);
- b[txt_size] = '\0';
-
- result = debVS.CmpVersion (a, b);
-
- pfree (a);
- pfree (b);
-
- return (result);
- }
-
-}
Thank you for reporting the bug, which will now be closed. If you
have further comments please address them to __BUG_NUMBER__@__BUG_SERVER__.
+The full log for this bug can be viewed at http://__BUG_SERVER__/__BUG_NUMBER__
+
This message was generated automatically; if you believe that there is
a problem with it please contact the archive administrators by mailing
__ADMIN_ADDRESS__.
<font size="-2">Made by Eduard Bloch <blade@debian.org>
<br>Small update to use 12h dinstall by Felipe Augusto van de Wiel (faw)
<br>Small update to use 6h dinstall by Mike O'Connor (stew)
+<br>Please check this <a href="http://lists.debian.org/debian-project/2008/12/msg00114.html">announcement</a> about dinstall every 6 hours.
</BODY>