from inspect import getargspec
import sqlalchemy
-from sqlalchemy import create_engine, Table, MetaData, Column, Integer
-from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, backref
+from sqlalchemy import create_engine, Table, MetaData, Column, Integer, desc, \
+ Text, ForeignKey
+from sqlalchemy.orm import sessionmaker, mapper, relation, object_session, \
+ backref, MapperExtension, EXT_CONTINUE, object_mapper
from sqlalchemy import types as sqltypes
# Don't remove this, we re-export the exceptions to scripts which import us
# in the database
from config import Config
from textutils import fix_maintainer
-from dak_exceptions import NoSourceFieldError
+from dak_exceptions import DBUpdateError, NoSourceFieldError
# suppress some deprecation warnings in squeeze related to sqlalchemy
import warnings
class ORMObject(object):
"""
ORMObject is a base class for all ORM classes mapped by SQLalchemy. All
- derived classes must implement the summary() method.
+ derived classes must implement the properties() method.
"""
def properties(self):
for property in all_properties:
# check for list or query
if property[-6:] == '_count':
- value = getattr(self, property[:-6])
+ real_property = property[:-6]
+ if not hasattr(self, real_property):
+ continue
+ value = getattr(self, real_property)
if hasattr(value, '__len__'):
# list
value = len(value)
else:
raise KeyError('Do not understand property %s.' % property)
else:
+ if not hasattr(self, property):
+ continue
# plain object
value = getattr(self, property)
if value is None:
# skip None
- pass
+ continue
elif isinstance(value, ORMObject):
# use repr() for ORMObject types
value = repr(value)
else:
# we want a string for all other types because json cannot
- # everything
+ # encode everything
value = str(value)
data[property] = value
return json.dumps(data)
'''
return '<%s %s>' % (self.classname(), self.json())
+ def not_null_constraints(self):
+ '''
+ Returns a list of properties that must be not NULL. Derived classes
+ should override this method if needed.
+ '''
+ return []
+
+ validation_message = \
+ "Validation failed because property '%s' must not be empty in object\n%s"
+
+ def validate(self):
+ '''
+ This function validates the not NULL constraints as returned by
+ not_null_constraints(). It raises the DBUpdateError exception if
+ validation fails.
+ '''
+ for property in self.not_null_constraints():
+ # TODO: It is a bit awkward that the mapper configuration allow
+ # directly setting the numeric _id columns. We should get rid of it
+ # in the long run.
+ if hasattr(self, property + '_id') and \
+ getattr(self, property + '_id') is not None:
+ continue
+ if not hasattr(self, property) or getattr(self, property) is None:
+ raise DBUpdateError(self.validation_message % \
+ (property, str(self)))
+
+ @classmethod
+ @session_wrapper
+ def get(cls, primary_key, session = None):
+ '''
+ This is a support function that allows getting an object by its primary
+ key.
+
+ Architecture.get(3[, session])
+
+ instead of the more verbose
+
+ session.query(Architecture).get(3)
+ '''
+ return session.query(cls).get(primary_key)
+
+ def session(self, replace = False):
+ '''
+ Returns the current session that is associated with the object. May
+ return None is object is in detached state.
+ '''
+
+ return object_session(self)
+
+ def clone(self, session = None):
+ '''
+ Clones the current object in a new session and returns the new clone. A
+ fresh session is created if the optional session parameter is not
+ provided. The function will fail if a session is provided and has
+ unflushed changes.
+
+ RATIONALE: SQLAlchemy's session is not thread safe. This method clones
+ an existing object to allow several threads to work with their own
+ instances of an ORMObject.
+
+ WARNING: Only persistent (committed) objects can be cloned. Changes
+ made to the original object that are not committed yet will get lost.
+ The session of the new object will always be rolled back to avoid
+ ressource leaks.
+ '''
+
+ if self.session() is None:
+ raise RuntimeError( \
+ 'Method clone() failed for detached object:\n%s' % self)
+ self.session().flush()
+ mapper = object_mapper(self)
+ primary_key = mapper.primary_key_from_instance(self)
+ object_class = self.__class__
+ if session is None:
+ session = DBConn().session()
+ elif len(session.new) + len(session.dirty) + len(session.deleted) > 0:
+ raise RuntimeError( \
+ 'Method clone() failed due to unflushed changes in session.')
+ new_object = session.query(object_class).get(primary_key)
+ session.rollback()
+ if new_object is None:
+ raise RuntimeError( \
+ 'Method clone() failed for non-persistent object:\n%s' % self)
+ return new_object
+
__all__.append('ORMObject')
################################################################################
+class Validator(MapperExtension):
+ '''
+ This class calls the validate() method for each instance for the
+ 'before_update' and 'before_insert' events. A global object validator is
+ used for configuring the individual mappers.
+ '''
+
+ def before_update(self, mapper, connection, instance):
+ instance.validate()
+ return EXT_CONTINUE
+
+ def before_insert(self, mapper, connection, instance):
+ instance.validate()
+ return EXT_CONTINUE
+
+validator = Validator()
+
+################################################################################
+
class Architecture(ORMObject):
def __init__(self, arch_string = None, description = None):
self.arch_string = arch_string
def properties(self):
return ['arch_string', 'arch_id', 'suites_count']
+ def not_null_constraints(self):
+ return ['arch_string']
+
__all__.append('Architecture')
@session_wrapper
################################################################################
-class BinAssociation(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<BinAssociation %s (%s, %s)>' % (self.ba_id, self.binary, self.suite)
+class BinContents(ORMObject):
+ def properties(silf):
+ return ['file', 'binary']
-__all__.append('BinAssociation')
+__all__.append('BinContents')
################################################################################
-class BinContents(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def __repr__(self):
- return '<BinContents (%s, %s)>' % (self.binary, self.filename)
-
-__all__.append('BinContents')
+class DBBinary(ORMObject):
+ def __init__(self, package = None, source = None, version = None, \
+ maintainer = None, architecture = None, poolfile = None, \
+ binarytype = 'deb'):
+ self.package = package
+ self.source = source
+ self.version = version
+ self.maintainer = maintainer
+ self.architecture = architecture
+ self.poolfile = poolfile
+ self.binarytype = binarytype
-################################################################################
+ def properties(self):
+ return ['package', 'version', 'maintainer', 'source', 'architecture', \
+ 'poolfile', 'binarytype', 'fingerprint', 'install_date', \
+ 'suites_count', 'binary_id', 'contents_count']
-class DBBinary(object):
- def __init__(self, *args, **kwargs):
- pass
+ def not_null_constraints(self):
+ return ['package', 'version', 'maintainer', 'source', 'poolfile', \
+ 'binarytype']
- def __repr__(self):
- return '<DBBinary %s (%s, %s)>' % (self.package, self.version, self.architecture)
+ def get_component_name(self):
+ return self.poolfile.location.component.component_name
__all__.append('DBBinary')
@return: list of Suite objects for the given package
"""
- return session.query(Suite).join(BinAssociation).join(DBBinary).filter_by(package=package).all()
+ return session.query(Suite).filter(Suite.binaries.any(DBBinary.package == package)).all()
__all__.append('get_suites_binary_in')
@session_wrapper
-def get_binary_from_id(binary_id, session=None):
- """
- Returns DBBinary object for given C{id}
-
- @type binary_id: int
- @param binary_id: Id of the required binary
-
- @type session: Session
- @param session: Optional SQLA session object (a temporary one will be
- generated if not supplied)
-
- @rtype: DBBinary
- @return: DBBinary object for the given binary (None if not present)
- """
-
- q = session.query(DBBinary).filter_by(binary_id=binary_id)
-
- try:
- return q.one()
- except NoResultFound:
- return None
-
-__all__.append('get_binary_from_id')
-
-@session_wrapper
-def get_binaries_from_name(package, version=None, architecture=None, session=None):
- """
- Returns list of DBBinary objects for given C{package} name
+def get_component_by_package_suite(package, suite_list, arch_list=[], session=None):
+ '''
+ Returns the component name of the newest binary package in suite_list or
+ None if no package is found. The result can be optionally filtered by a list
+ of architecture names.
@type package: str
@param package: DBBinary package name to search for
- @type version: str or None
- @param version: Version to search for (or None)
-
- @type architecture: str, list or None
- @param architecture: Architectures to limit to (or None if no limit)
-
- @type session: Session
- @param session: Optional SQL session object (a temporary one will be
- generated if not supplied)
-
- @rtype: list
- @return: list of DBBinary objects for the given name (may be empty)
- """
-
- q = session.query(DBBinary).filter_by(package=package)
-
- if version is not None:
- q = q.filter_by(version=version)
+ @type suite_list: list of str
+ @param suite_list: list of suite_name items
- if architecture is not None:
- if not isinstance(architecture, list):
- architecture = [architecture]
- q = q.join(Architecture).filter(Architecture.arch_string.in_(architecture))
+ @type arch_list: list of str
+ @param arch_list: optional list of arch_string items that defaults to []
- ret = q.all()
+ @rtype: str or NoneType
+ @return: name of component or None
+ '''
- return ret
-
-__all__.append('get_binaries_from_name')
-
-@session_wrapper
-def get_binaries_from_source_id(source_id, session=None):
- """
- Returns list of DBBinary objects for given C{source_id}
-
- @type source_id: int
- @param source_id: source_id to search for
-
- @type session: Session
- @param session: Optional SQL session object (a temporary one will be
- generated if not supplied)
-
- @rtype: list
- @return: list of DBBinary objects for the given name (may be empty)
- """
-
- return session.query(DBBinary).filter_by(source_id=source_id).all()
-
-__all__.append('get_binaries_from_source_id')
-
-@session_wrapper
-def get_binary_from_name_suite(package, suitename, session=None):
- ### For dak examine-package
- ### XXX: Doesn't use object API yet
-
- sql = """SELECT DISTINCT(b.package), b.version, c.name, su.suite_name
- FROM binaries b, files fi, location l, component c, bin_associations ba, suite su
- WHERE b.package='%(package)s'
- AND b.file = fi.id
- AND fi.location = l.id
- AND l.component = c.id
- AND ba.bin=b.id
- AND ba.suite = su.id
- AND su.suite_name %(suitename)s
- ORDER BY b.version DESC"""
-
- return session.execute(sql % {'package': package, 'suitename': suitename})
-
-__all__.append('get_binary_from_name_suite')
-
-@session_wrapper
-def get_binary_components(package, suitename, arch, session=None):
- # Check for packages that have moved from one component to another
- query = """SELECT c.name FROM binaries b, bin_associations ba, suite s, location l, component c, architecture a, files f
- WHERE b.package=:package AND s.suite_name=:suitename
- AND (a.arch_string = :arch OR a.arch_string = 'all')
- AND ba.bin = b.id AND ba.suite = s.id AND b.architecture = a.id
- AND f.location = l.id
- AND l.component = c.id
- AND b.file = f.id"""
-
- vals = {'package': package, 'suitename': suitename, 'arch': arch}
-
- return session.execute(query, vals)
+ q = session.query(DBBinary).filter_by(package = package). \
+ join(DBBinary.suites).filter(Suite.suite_name.in_(suite_list))
+ if len(arch_list) > 0:
+ q = q.join(DBBinary.architecture). \
+ filter(Architecture.arch_string.in_(arch_list))
+ binary = q.order_by(desc(DBBinary.version)).first()
+ if binary is None:
+ return None
+ else:
+ return binary.get_component_name()
-__all__.append('get_binary_components')
+__all__.append('get_component_by_package_suite')
################################################################################
################################################################################
-class Component(object):
- def __init__(self, *args, **kwargs):
- pass
+class Component(ORMObject):
+ def __init__(self, component_name = None):
+ self.component_name = component_name
def __eq__(self, val):
if isinstance(val, str):
# This signals to use the normal comparison operator
return NotImplemented
- def __repr__(self):
- return '<Component %s>' % self.component_name
+ def properties(self):
+ return ['component_name', 'component_id', 'description', 'location', \
+ 'meets_dfsg']
+
+ def not_null_constraints(self):
+ return ['component_name']
__all__.append('Component')
def fullpath(self):
return os.path.join(self.location.path, self.filename)
- def is_valid(self, filesize = -1, md5sum = None):\
- return self.filesize == filesize and self.md5sum == md5sum
+ def is_valid(self, filesize = -1, md5sum = None):
+ return self.filesize == long(filesize) and self.md5sum == md5sum
def properties(self):
return ['filename', 'file_id', 'filesize', 'md5sum', 'sha1sum', \
- 'sha256sum', 'location', 'source', 'last_used']
+ 'sha256sum', 'location', 'source', 'binary', 'last_used']
+
+ def not_null_constraints(self):
+ return ['filename', 'md5sum', 'location']
__all__.append('PoolFile')
################################################################################
-class Fingerprint(object):
+class Fingerprint(ORMObject):
def __init__(self, fingerprint = None):
self.fingerprint = fingerprint
- def __repr__(self):
- return '<Fingerprint %s>' % self.fingerprint
+ def properties(self):
+ return ['fingerprint', 'fingerprint_id', 'keyring', 'uid', \
+ 'binary_reject']
+
+ def not_null_constraints(self):
+ return ['fingerprint']
__all__.append('Fingerprint')
################################################################################
-class Location(object):
- def __init__(self, path = None):
+class Location(ORMObject):
+ def __init__(self, path = None, component = None):
self.path = path
+ self.component = component
# the column 'type' should go away, see comment at mapper
self.archive_type = 'pool'
- def __repr__(self):
- return '<Location %s (%s)>' % (self.path, self.location_id)
+ def properties(self):
+ return ['path', 'location_id', 'archive_type', 'component', \
+ 'files_count']
+
+ def not_null_constraints(self):
+ return ['path', 'archive_type']
__all__.append('Location')
################################################################################
-class Maintainer(object):
+class Maintainer(ORMObject):
def __init__(self, name = None):
self.name = name
- def __repr__(self):
- return '''<Maintainer '%s' (%s)>''' % (self.name, self.maintainer_id)
+ def properties(self):
+ return ['name', 'maintainer_id']
+
+ def not_null_constraints(self):
+ return ['name']
def get_split_maintainer(self):
if not hasattr(self, 'name') or self.name is None:
################################################################################
-class DBSource(object):
+class DBSource(ORMObject):
def __init__(self, source = None, version = None, maintainer = None, \
changedby = None, poolfile = None, install_date = None):
self.source = source
self.poolfile = poolfile
self.install_date = install_date
- def __repr__(self):
- return '<DBSource %s (%s)>' % (self.source, self.version)
+ def properties(self):
+ return ['source', 'source_id', 'maintainer', 'changedby', \
+ 'fingerprint', 'poolfile', 'version', 'suites_count', \
+ 'install_date', 'binaries_count']
+
+ def not_null_constraints(self):
+ return ['source', 'version', 'install_date', 'maintainer', \
+ 'changedby', 'poolfile', 'install_date']
__all__.append('DBSource')
# Add and flush object so it has an ID
session.add(bin)
- session.flush()
- # Add BinAssociations
- for suite_name in u.pkg.changes["distribution"].keys():
- ba = BinAssociation()
- ba.binary_id = bin.binary_id
- ba.suite_id = get_suite(suite_name).suite_id
- session.add(ba)
+ suite_names = u.pkg.changes["distribution"].keys()
+ bin.suites = session.query(Suite). \
+ filter(Suite.suite_name.in_(suite_names)).all()
session.flush()
# Why the heck don't we have any UNIQUE constraints in table suite?
# TODO: Add UNIQUE constraints for appropriate columns.
-class Suite(object):
+class Suite(ORMObject):
def __init__(self, suite_name = None, version = None):
self.suite_name = suite_name
self.version = version
- def __repr__(self):
- return '<Suite %s>' % self.suite_name
+ def properties(self):
+ return ['suite_name', 'version', 'sources_count', 'binaries_count']
+
+ def not_null_constraints(self):
+ return ['suite_name', 'version']
def __eq__(self, val):
if isinstance(val, str):
################################################################################
-class Uid(object):
+class Uid(ORMObject):
def __init__(self, uid = None, name = None):
self.uid = uid
self.name = name
# This signals to use the normal comparison operator
return NotImplemented
- def __repr__(self):
- return '<Uid %s (%s)>' % (self.uid, self.name)
+ def properties(self):
+ return ['uid', 'name', 'fingerprint']
+
+ def not_null_constraints(self):
+ return ['uid']
__all__.append('Uid')
# The following tables have primary keys but sqlalchemy
# version 0.5 fails to reflect them correctly with database
# versions before upgrade #41.
- #'changes',
- #'build_queue_files',
+ 'changes',
+ 'build_queue_files',
)
tables_no_primary = (
- 'bin_contents',
'changes_pending_files_map',
'changes_pending_source_files',
'changes_pool_files',
'suite_build_queue_copy',
'udeb_contents',
# see the comment above
- 'changes',
- 'build_queue_files',
+ #'changes',
+ #'build_queue_files',
)
views = (
table = Table(table_name, self.db_meta, autoload=True)
setattr(self, 'tbl_%s' % table_name, table)
+ # bin_contents needs special attention until update #41 has been
+ # applied
+ self.tbl_bin_contents = Table('bin_contents', self.db_meta, \
+ Column('file', Text, primary_key = True),
+ Column('binary_id', Integer, ForeignKey('binaries.id'), \
+ primary_key = True),
+ autoload=True, useexisting=True)
+
for view_name in views:
view = Table(view_name, self.db_meta, autoload=True)
setattr(self, 'view_%s' % view_name, view)
def __setupmappers(self):
mapper(Architecture, self.tbl_architecture,
- properties = dict(arch_id = self.tbl_architecture.c.id,
+ properties = dict(arch_id = self.tbl_architecture.c.id,
suites = relation(Suite, secondary=self.tbl_suite_architectures,
order_by='suite_name',
- backref=backref('architectures', order_by='arch_string'))))
+ backref=backref('architectures', order_by='arch_string'))),
+ extension = validator)
mapper(Archive, self.tbl_archive,
properties = dict(archive_id = self.tbl_archive.c.id,
archive_name = self.tbl_archive.c.name))
- mapper(BinAssociation, self.tbl_bin_associations,
- properties = dict(ba_id = self.tbl_bin_associations.c.id,
- suite_id = self.tbl_bin_associations.c.suite,
- suite = relation(Suite),
- binary_id = self.tbl_bin_associations.c.bin,
- binary = relation(DBBinary)))
-
mapper(PendingBinContents, self.tbl_pending_bin_contents,
properties = dict(contents_id =self.tbl_pending_bin_contents.c.id,
filename = self.tbl_pending_bin_contents.c.filename,
maintainer_id = self.tbl_binaries.c.maintainer,
maintainer = relation(Maintainer),
source_id = self.tbl_binaries.c.source,
- source = relation(DBSource),
+ source = relation(DBSource, backref='binaries'),
arch_id = self.tbl_binaries.c.architecture,
architecture = relation(Architecture),
poolfile_id = self.tbl_binaries.c.file,
- poolfile = relation(PoolFile),
+ poolfile = relation(PoolFile, backref=backref('binary', uselist = False)),
binarytype = self.tbl_binaries.c.type,
fingerprint_id = self.tbl_binaries.c.sig_fpr,
fingerprint = relation(Fingerprint),
install_date = self.tbl_binaries.c.install_date,
- binassociations = relation(BinAssociation,
- primaryjoin=(self.tbl_binaries.c.id==self.tbl_bin_associations.c.bin))))
+ suites = relation(Suite, secondary=self.tbl_bin_associations,
+ backref=backref('binaries', lazy='dynamic'))),
+ extension = validator)
mapper(BinaryACL, self.tbl_binary_acl,
properties = dict(binary_acl_id = self.tbl_binary_acl.c.id))
mapper(Component, self.tbl_component,
properties = dict(component_id = self.tbl_component.c.id,
- component_name = self.tbl_component.c.name))
+ component_name = self.tbl_component.c.name),
+ extension = validator)
mapper(DBConfig, self.tbl_config,
properties = dict(config_id = self.tbl_config.c.id))
# using lazy='dynamic' in the back
# reference because we have A LOT of
# files in one location
- backref=backref('files', lazy='dynamic'))))
+ backref=backref('files', lazy='dynamic'))),
+ extension = validator)
mapper(Fingerprint, self.tbl_fingerprint,
properties = dict(fingerprint_id = self.tbl_fingerprint.c.id,
keyring_id = self.tbl_fingerprint.c.keyring,
keyring = relation(Keyring),
source_acl = relation(SourceACL),
- binary_acl = relation(BinaryACL)))
+ binary_acl = relation(BinaryACL)),
+ extension = validator)
mapper(Keyring, self.tbl_keyrings,
properties = dict(keyring_name = self.tbl_keyrings.c.name,
mapper(Location, self.tbl_location,
properties = dict(location_id = self.tbl_location.c.id,
component_id = self.tbl_location.c.component,
- component = relation(Component),
+ component = relation(Component, \
+ backref=backref('location', uselist = False)),
archive_id = self.tbl_location.c.archive,
archive = relation(Archive),
# FIXME: the 'type' column is old cruft and
# should be removed in the future.
- archive_type = self.tbl_location.c.type))
+ archive_type = self.tbl_location.c.type),
+ extension = validator)
mapper(Maintainer, self.tbl_maintainer,
properties = dict(maintainer_id = self.tbl_maintainer.c.id,
maintains_sources = relation(DBSource, backref='maintainer',
primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.maintainer)),
changed_sources = relation(DBSource, backref='changedby',
- primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))))
+ primaryjoin=(self.tbl_maintainer.c.id==self.tbl_source.c.changedby))),
+ extension = validator)
mapper(NewComment, self.tbl_new_comments,
properties = dict(comment_id = self.tbl_new_comments.c.id))
srcfiles = relation(DSCFile,
primaryjoin=(self.tbl_source.c.id==self.tbl_dsc_files.c.source)),
suites = relation(Suite, secondary=self.tbl_src_associations,
- backref='sources'),
- srcuploaders = relation(SrcUploader)))
+ backref=backref('sources', lazy='dynamic')),
+ srcuploaders = relation(SrcUploader)),
+ extension = validator)
mapper(SourceACL, self.tbl_source_acl,
properties = dict(source_acl_id = self.tbl_source_acl.c.id))
mapper(Suite, self.tbl_suite,
properties = dict(suite_id = self.tbl_suite.c.id,
policy_queue = relation(PolicyQueue),
- copy_queues = relation(BuildQueue, secondary=self.tbl_suite_build_queue_copy)))
+ copy_queues = relation(BuildQueue,
+ secondary=self.tbl_suite_build_queue_copy)),
+ extension = validator)
mapper(SuiteSrcFormat, self.tbl_suite_src_formats,
properties = dict(suite_id = self.tbl_suite_src_formats.c.suite,
mapper(Uid, self.tbl_uid,
properties = dict(uid_id = self.tbl_uid.c.id,
- fingerprint = relation(Fingerprint)))
+ fingerprint = relation(Fingerprint)),
+ extension = validator)
mapper(UploadBlock, self.tbl_upload_blocks,
properties = dict(upload_block_id = self.tbl_upload_blocks.c.id,
fingerprint = relation(Fingerprint, backref="uploadblocks"),
uid = relation(Uid, backref="uploadblocks")))
+ mapper(BinContents, self.tbl_bin_contents,
+ properties = dict(
+ binary = relation(DBBinary,
+ backref=backref('contents', lazy='dynamic')),
+ file = self.tbl_bin_contents.c.file))
+
## Connection functions
def __createconn(self):
from config import Config