New Research: Supply Chain Attack on Axios Pulls Malicious Dependency from npm.Details →
Socket
Book a DemoSign in
Socket

orb-api

Package Overview
Dependencies
Maintainers
4
Versions
99
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

orb-api - pypi Package Compare versions

Comparing version
2016.1.45
to
2016.0.252
+7
orb/caching/__init__.py
"""
Defines caching methods to use when working with tables.
"""
from .datacache import DataCache
from .recordcache import RecordCache
from .tablecache import TableCache
__recurse__ = False
__toc__ = [r'orb.caching.backends.basiccache',
r'orb.caching.backends.rediscache']
import datetime
from projex.lazymodule import lazy_import
from orb.caching.datacache import DataCache
from projex.locks import ReadLocker, ReadWriteLock, WriteLocker
orb = lazy_import('orb')
# noinspection PyAbstractClass
class BasicCache(DataCache):
""" Base caching object for tracking data caches """
def __init__(self, timeout=0):
super(BasicCache, self).__init__(timeout)
# define custom properties
self._cacheLock = ReadWriteLock()
self._cache = {}
self._expiresAt = {}
def _cleanup(self):
"""
Cleans up any expired keys from the cache.
"""
now = datetime.datetime.now()
with WriteLocker(self._cacheLock):
for key, expires in self._expiresAt.items():
if expires < now:
self._expiresAt.pop(key, None)
self._cache.pop(key, None)
def expire(self, key=None):
"""
Expires the given key from the local cache.
:param key | <hashable>
"""
with WriteLocker(self._cacheLock):
if key:
self._cache.pop(key, None)
self._expiresAt.pop(key, None)
else:
self._cache.clear()
self._expiresAt.clear()
def isCached(self, key):
"""
Returns whether or not the inputted key is cached.
:param key | <hashable>
:return <bool>
"""
if not self.isEnabled():
return False
self._cleanup()
with ReadLocker(self._cacheLock):
return key in self._cache
def setValue(self, key, value, timeout=None):
"""
Caches the inputted key and value to this instance.
:param key | <hashable>
value | <variant>
"""
if not self.isEnabled():
return
timeout = timeout or self.timeout()
with WriteLocker(self._cacheLock):
self._cache[key] = value
self._expiresAt[key] = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
def value(self, key, default=None):
"""
Returns the value for the cached key for this instance.
:return <variant>
"""
with ReadLocker(self._cacheLock):
return self._cache.get(key)
# create a basic cache object
DataCache.registerAddon('Basic', BasicCache)
import cPickle
from projex.lazymodule import lazy_import
from orb.caching.datacache import DataCache
redis = lazy_import('redis')
orb = lazy_import('orb')
# noinspection PyAbstractClass
class RedisCache(DataCache):
""" Base caching object for tracking data caches """
def __init__(self, host='localhost', port=6379, timeout=0):
super(RedisCache, self).__init__(timeout)
# define custom properties
self._client = redis.StrictRedis(host=host, port=port)
@staticmethod
def __key(key):
return 'ORB({0})'.format(key or '*')
def expire(self, key=None):
"""
Expires out all the ORB related keys from the redis cache.
:param key | <hashable>
"""
if key:
key = self.__key(key)
self._client.delete(self.__key(key))
else:
keys = self._client.keys(self.__key(key))
with self._client.pipeline() as pipe:
pipe.multi()
for key in keys:
pipe.delete(key)
pipe.execute()
def isCached(self, key):
"""
Returns whether or not the inputted key is cached.
:param key | <hashable>
:return <bool>
"""
return self._client.exists(self.__key(key))
def setValue(self, key, value, timeout=None):
"""
Caches the inputted key and value to this instance.
:param key | <hashable>
value | <variant>
"""
key = self.__key(key)
self._client.set(key, cPickle.dumps(value))
self._client.expire(key, int(timeout or self.timeout()))
def value(self, key, default=None):
"""
Returns the value for the cached key for this instance.
:return <variant>
"""
key = self.__key(key)
value = self._client.get(key)
if value is not None:
return cPickle.loads(value)
else:
return default
# create a basic cache object
DataCache.registerAddon('Redis', RedisCache)
"""
Defines caching methods to use when working with tables.
"""
import datetime
import projex
from projex.decorators import abstractmethod
from projex.addon import AddonManager
from projex.lazymodule import lazy_import
orb = lazy_import('orb')
class DataCache(AddonManager):
""" Base caching object for tracking data caches """
def __init__(self, timeout=0):
self._timeout = timeout
self._enabled = True
def __getitem__(self, key):
if self.isCached(key):
return self.value(key)
else:
raise KeyError(key)
def __setitem__(self, key, value):
self.setValue(key, value)
# noinspection PyUnusedLocal
@abstractmethod()
def cachedAt(self, key):
"""
Returns when the inputted key was last cached for this instance.
:param key | <hashable>
:return <datetime>
"""
return datetime.datetime.min
def clear(self):
"""
Clears out all the caching information for this instance.
"""
return self.expire()
@abstractmethod()
def expire(self, key=None):
"""
Removes the given key or clears the cache for this data cache.
:param key | <hashable> || None
"""
return False
@abstractmethod()
def isCached(self, key):
"""
Returns whether or not the inputted key is cached.
:param key | <hashable>
:return <bool>
"""
return False
def isEnabled(self):
"""
Returns whether or not this cache instance is enabled. You can
disable local cache instances by calling setEnabled, or global
caching from the manager instance.
:return <bool>
"""
return self._enabled and orb.system.isCachingEnabled()
def setEnabled(self, state):
"""
Sets whether or not the caching for this instance is enabled.
:param state | <bool>
"""
self._enabled = state
def setTimeout(self, timeout):
"""
Sets the number of milliseconds that this cache will store its information
before it expires.
:param timeout | <int> | milliseconds
"""
self._timeout = timeout
@abstractmethod()
def setValue(self, key, value, timeout=0):
"""
Caches the inputted key and value to this instance.
:param key | <hashable>
value | <variant>
"""
pass
def timeout(self):
"""
Returns the number of seconds that this cache will store its
information before it timeout.
:return <int> | seconds
"""
return self._timeout or orb.system.maxCacheTimeout()
@abstractmethod()
def value(self, key, default=None):
"""
Returns the value for the cached key for this instance.
:return <variant>
"""
return default
@staticmethod
def create(timeout=0):
"""
Creates a new data cache instance through the orb system manager. To setup a factory, use
`orb.system.setCacheFactory(factory)`
:return <orb.caching.DataCache>
"""
factory = orb.system.cacheFactory()
if factory:
if isinstance(factory, orb.DataCache):
return factory
elif callable(factory):
return factory(timeout)
else:
cls = DataCache.byName(factory)
return cls(timeout)
else:
cls = DataCache.byName('Basic')
return cls(timeout)
from orb.caching.backends import __plugins__
projex.importmodules(__plugins__)
"""
Defines caching methods to use when working with tables.
"""
import logging
from projex.lazymodule import lazy_import
log = logging.getLogger(__name__)
orb = lazy_import('orb')
class OrderCompare(object):
""" Defines a class for comparing records by a database order scheme """
def __init__(self, order):
self._order = order
def __call__(self, a, b):
"""
Compares the inputted values for each column based on a given
direction.
:param a | <dict>
b | <dict>
:return <int> -1 || 0 || 1
"""
for col, direction in self._order:
a_val = a.get(col)
b_val = b.get(col)
# ignore same results
result = cmp(a_val, b_val)
if not result:
continue
# return the direction result
if direction == 'desc':
return -result
else:
return result
return 0
# ----------------------------------------------------------------------
class RecordCache(object):
"""
Defines a key cache class for the table to use when caching records from the
system. Caching is defined on the TableSchema class, see
TableSchema.setCachingEnabled for more info.
:usage |from orb import RecordCache
|
|with RecordCache(User, AccountType):
| for transaction in Transactions.select():
| print transaction.transferredBy() # lookups up from
| # the user cache
|from orb import Table
|
|class User(Table):
| __db_cached__ = True
|
|User.select() # looks up from the RecordCache for the table
"""
def __init__(self, *tables, **kwds):
if not tables:
tables = orb.system.models()
timeout = kwds.get('timeout', None)
self._caches = dict([(table, orb.TableCache(table, table.schema().cache(), timeout=timeout))
for table in tables])
def __enter__(self):
self.begin()
# noinspection PyUnusedLocal
def __exit__(self, *args):
self.end()
def begin(self):
"""
Begins the caching process for this instance.
"""
for table in self._caches:
table.pushRecordCache(self)
# noinspection PyUnusedLocal
def cache(self, table, autocreate=False):
"""
Returns the cache associated with this record cache for the given
table.
:return <orb.TableCache> || None
"""
return self._caches.get(table)
def clear(self, table=None):
"""
Clears the current cache information.
"""
if table:
cache = self._caches.get(table)
if cache:
cache.clear()
else:
for cache in self._caches.values():
cache.clear()
def count(self, backend, table, lookup, options):
"""
Returns the number of entries based on the given options.
:param backend | <orb.Connection>
table | subclass of <orb.Table>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return <int>
"""
options.inflated = False
return len(self.select(backend, table, lookup, options))
def distinct(self, backend, table, lookup, options):
"""
Returns a distinct set o fentries based on the given lookup options.
:param table | subclass of <orb.Table>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return {<str> columnName: <list> value, ..}
"""
columns = list(lookup.columns)
output = dict([(column, set()) for column in columns])
for record in self.select(backend, table, lookup, options):
for column in columns:
output[column].add(record.get(column))
for key, value in output.items():
output[key] = list(value)
return output
def end(self):
"""
Ends the caching process for this instance.
"""
for table in self._caches:
table.popRecordCache()
def preloadedRecords(self, table, lookup):
"""
Looking up pre-loaded record information.
:param table | subclass of <orb.Table>
lookup | <orb.LookupOptions>
:return [<dict> data, ..]
"""
cache = self.cache(table)
if not cache:
return []
records = cache.value('preloaded_records', [])
if lookup.order:
schema = table.schema()
records = sorted(records, OrderCompare([(schema.column(x).fieldName(), y) for x, y in lookup.order]))
start = lookup.start or 0
offset = 0
output = []
schema = table.schema()
columns = schema.columns() if not lookup.columns else [schema.column(col) for col in lookup.columns]
for r, record in enumerate(records):
# ensure we're looking up a valid record
if lookup.where and not lookup.where.validate(record, table):
continue
if start < offset:
offset += 1
continue
# ensure we have unique ordering for distinct
record = [item for item in record.items() if schema.column(item[0]) in columns]
record.sort()
if lookup.distinct and record in output:
continue
output.append(dict(record))
if lookup.limit and len(output) == lookup.limit:
break
return output
def record(self, table, primaryKey):
"""
Returns a record for the given primary key.
:return {<str> columnName: <variant> value, ..} record
"""
lookup = orb.LookupOptions()
lookup.where = orb.Query(table) == primaryKey
options = orb.ContextOptions()
return self.selectFirst(table.getDatabase().backend(),
table,
lookup,
options)
def records(self, table, **options):
"""
Returns a list of records that are cached.
:param table | subclass of <orb.Table>
query | <orb.Query> || None
:return [<Table>, ..]
"""
if 'query' in options:
options['where'] = options.pop('query')
lookup = orb.LookupOptions(**options)
ctxt_opts = orb.ContextOptions(**options)
return self.select(table.getDatabase().backend(),
table,
lookup,
ctxt_opts)
def setTimeout(self, table, seconds):
"""
Sets the length of time in minutes to hold onto this cache before
re-querying the database.
:param table | subclass of <orb.Table>
seconds | <int> || <float>
"""
cache = self.cache(table)
if cache:
timeout = seconds
table_timeout = table.schema().cacheTimeout()
max_timeout = orb.system.maxCacheTimeout()
opts = [timeout, table_timeout, max_timeout]
timeout = min(filter(lambda x: x > 0, opts))
# set the timeout in seconds
cache.setTimeout(timeout)
def selectFirst(self, backend, table, lookup, options):
"""
Returns a list of records from the cache that matches the inputted
parameters.
:param table | subclass of <orb.Table>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return <dict> record || None
"""
lookup.limit = 1
results = self.select(backend, table, lookup, options)
if results:
return results[0]
return None
def select(self, backend, table, lookup, options):
"""
Returns a list of records from the cache that matches the inputted
parameters.
:param backend | <orb.Connection>
table | subclass of <orb.Table>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return [<dict> record, ..]
"""
cache = self.cache(table)
cache_key = (hash(lookup), hash(options), backend.database().name() if backend.database() else '')
preload_key = 'preloaded_records'
# determine if the query is simple (only involving a simple table)
# if it is, we can use local querying on the cached set for
# preloaded records. If it is not (joined tables) we need to
# actually query the database and cache the results for this query
if lookup.where is not None:
is_simple = len(lookup.where.tables(table)) <= 1
else:
is_simple = not (bool(lookup.expand) or options.locale != orb.system.locale())
# return an exact cached match
if cache and cache.isCached(cache_key):
return cache.value(cache_key)
# return a parsed match from preloaded records
elif is_simple and cache and cache.isCached(preload_key):
records = self.preloadedRecords(table, lookup)
cache.setValue(cache_key, records)
return records
# otherwise, determine if we need to load this exact search
# or reload the pre-loaded records
elif is_simple and cache and cache.isPreloaded():
all_lookup = orb.LookupOptions()
all_opts = orb.ContextOptions()
records = backend.select(table, all_lookup, all_opts)
cache.setValue(preload_key, records)
records = self.preloadedRecords(table, lookup)
cache.setValue(cache_key, records)
return records
# otherwise, search the backend for this lookup specifically and
# cache the results
else:
records = backend.select(table, lookup, options)
if cache:
cache.setValue(cache_key, records)
return records
def tables(self):
"""
Returns the tables that are associated with this cache instance.
:return [subclass of <orb.Table>, ..]
"""
return self._caches.keys()
"""
Defines caching methods to use when working with tables.
"""
from projex.lazymodule import lazy_import
orb = lazy_import('orb')
class TableCache(object):
"""" Base class for referencing orb Table cache information """
def __init__(self, table, cache, timeout=None):
self._cache = cache
self._table = table
self._timeout = timeout
self._preloaded = table.schema().preloadCache()
def __getitem__(self, key):
return self._cache[self.__key(key)]
def __setitem__(self, key, value):
self._cache[self.__key(key)] = value
def __key(self, key):
"""
Create the key for this table cache by joining the table name with the inputted key.
:param key | <str>
"""
return '{0}({1})'.format(self.table().schema().name(), key)
def cache(self):
"""
Returns the cache object associated with this table.
:return <orb.caching.DataCache> || None
"""
return self._cache
def cachedAt(self, key):
"""
Returns the time when the given key was cached.
:param key | <str>
"""
return self._cache.cachedAt(self.__key(key))
def expire(self, key=None):
"""
Expires the given key, or the whole cache if no key is provided.
:param key | <hashable> || None
"""
return self._cache.expire(self.__key(key) if key else None)
def isCached(self, key):
"""
Returns whether or not the inputted key is cached.
:param key | <hashable> || None
"""
return self._cache.isCached(self.__key(key))
def isEnabled(self):
"""
Returns whether or not this table's cache is enabled.
:return <bool>
"""
return self._cache.isEnabled()
def isPreloaded(self):
"""
Returns whether or not the cache is preloaded.
:return <bool>
"""
return self._preloaded
def setEnabled(self, state):
"""
Sets whether or not to use the cache for this table.
:param state | <bool>
"""
self._cache.setEnabled(state)
def setPreloaded(self, state):
"""
Sets whether or not the cache should be preloaded.
:param state | <bool>
"""
self._preloaded = state
def setTable(self, table):
"""
Assign the table for this cache.
:param table | <orb.Table> || None
"""
self._table = table
def setTimeout(self, seconds):
"""
Sets the timeout length for this table cache to the inputted amount.
:param seconds | <int> || None
"""
self._timeout = seconds
def setValue(self, key, value, timeout=None):
"""
Stores the given key as the given value.
:param key | <hashable>
value | <variant>
"""
self._cache.setValue(self.__key(key), value, timeout=timeout or self.timeout())
def table(self):
"""
Returns the table associated with this cache instance.
:return <orb.Table>
"""
return self._table
def value(self, key, default=None):
"""
Returns the value for this cache's key.
:param key | <hashable>
default | <variant>
"""
return self._cache.value(self.__key(key), default)
def timeout(self):
"""
Returns the timeout for this cache.
:return <int> | seconds
"""
return self._timeout
""" Defines common properties and types used by the different modules. """
# ------------------------------------------------------------------------------
from projex.enum import enum
# C
# -----------------------------------------------------------------------------
ColumnType = enum(
# simple types
'Bool',
'Decimal',
'Double',
'Integer', # 32-bit integer
'BigInt', # 64-bit integer
'Enum', # equates to an integer in databases, but links to an enum
# string types
'String', # used for limited char sets
'Text', # used for larger string data
'Url', # similar to String, but uses url regex validation
'Email', # similar to String, but uses email regex validation
'Password', # similar to String, but uses password regex validation
'Filepath', # similar to String, but uses filepath regex validation
'Directory', # similar to String, but uses directory regex validation
'Xml', # similar to Text, but auto-escape data stored
'Html', # similar to Text, but store rich text data & auto-escape
'Color', # similar to String, stores the HEX value of a color (#ff00)
# date/time types
'Datetime',
'Date',
'Interval',
'Time',
'DatetimeWithTimezone',
'Timestamp',
'Timestamp_UTC',
# data types
'Image', # stores images in the database as binary
'ByteArray', # stores additional binary information
'Dict', # stores python dictionary types
'Pickle', # stores python pickle data
'Yaml', # stores python data as yaml (requires PyYaml)
'JSON', # stores python data as JSON
'Query', # stores an orb.Query class as xml
# relation types
'ForeignKey', # tells the system to use the relation's information
)
# D
# -----------------------------------------------------------------------------
DatabaseFlags = enum('SafeInsert')
DeleteFlags = enum('Cascaded', 'Blocked')
# R
# -----------------------------------------------------------------------------
RemovedAction = enum('DoNothing', 'Cascade', 'Block')
# S
#------------------------------------------------------------------------------
SearchMode = enum('Any', 'All')
SelectionMode = enum('Normal', 'Count', 'Distinct')
import orb
class SingleTransaction(object):
"""
This context will automatically close connections to a given set of databases.
If no databases are provided, then all databases in the system will be used.
"""
def __init__(self, *databases):
self._databases = databases or orb.system.databases()
def __enter__(self):
# nothing needed here
pass
def __exit__(self, exc_type, error, traceback):
for db in self._databases:
db.disconnect()
"""
Creates the database backend systems to manage the schema
data for different types of databases.
"""
__recurse__ = False
__toc__ = [r'orb.core.backends.nonsql',
r'orb.core.backends.sql',
r'orb.core.backends.web']
""" Defines all the non-SQL based connection classes. """
""" Defines all the SQL based connection classes. """
# import the backend SQL implementations
from .mysql import *
from .postgresql import *
"""
Defines the base abstract SQL connection for all SQL based
connection backends. """
import datetime
import logging
import orb
import projex.iters
import projex.text
import threading
import time
from collections import defaultdict
from orb import errors
from projex.decorators import abstractmethod
from projex.contexts import MultiContext
from projex.locks import ReadWriteLock, ReadLocker, WriteLocker
from projex.text import nativestring as nstr
log = logging.getLogger(__name__)
from .abstractsql import SQL
# noinspection PyAbstractClass,PyProtectedMember
class SQLConnection(orb.Connection):
"""
Creates a SQL based backend connection type for handling database
connections to different SQL based databases. This class can be subclassed
to define different SQL connections.f
"""
def __init__(self, database):
super(SQLConnection, self).__init__(database)
# define custom properties
self.__insertBatchSize = 500
self.__threads = {}
self.__concurrencyLocks = defaultdict(ReadWriteLock)
# set standard properties
self.setThreadEnabled(True)
# ----------------------------------------------------------------------
# PROTECTED METHODS
#----------------------------------------------------------------------
@abstractmethod()
def _execute(self,
command,
data=None,
autoCommit=True,
autoClose=True,
returning=True,
mapper=dict):
"""
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
autoCommit | <bool> | commit database changes immediately
autoClose | <bool> | closes connections immediately
returning | <bool>
mapper | <variant>
retries | <int>
:return [{<str> key: <variant>, ..}, ..], <int> rowcount
"""
return [], -1
@abstractmethod()
def _open(self, db):
"""
Handles simple, SQL specific connection creation. This will not
have to manage thread information as it is already managed within
the main open method for the SQL class.
:param db | <orb.Database>
:return <variant> | backend specific database connection
"""
return None
@abstractmethod()
def _interrupt(self, threadId, connection):
"""
Interrupts the given backend database connection from a separate thread.
:param threadId | <int>
connection | <variant> | backend specific database.
"""
pass
#----------------------------------------------------------------------
# PUBLIC METHODS
#----------------------------------------------------------------------
def close(self):
"""
Closes the connection to the database for this connection.
:return <bool> closed
"""
cid = threading.current_thread().ident
for tid, conn in self.__threads.items():
if tid == cid:
conn.close()
else:
self._interrupt(tid, conn)
self.__threads.clear()
return True
def count(self, table_or_join, lookup, options):
"""
Returns the count of records that will be loaded for the inputted
information.
:param table_or_join | <subclass of orb.Table> || None
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return <int>
"""
if orb.Table.typecheck(table_or_join) or orb.View.typecheck(table_or_join):
SELECT_COUNT = self.sql('SELECT_COUNT')
else:
SELECT_COUNT = self.sql('SELECT_COUNT_JOIN')
data = {}
try:
cmd = SELECT_COUNT(table_or_join,
lookup=lookup,
options=options,
IO=data)
except errors.QueryIsNull:
return 0
if options.dryRun:
print cmd % data
return 0
else:
rows, _ = self.execute(cmd, data)
return sum([row['count'] for row in rows])
def commit(self):
"""
Commits the changes to the current database connection.
:return <bool> success
"""
if not (self.isConnected() and self.commitEnabled()):
return False
if orb.Transaction.current():
orb.Transaction.current().setDirty(self)
else:
self.nativeConnection().commit()
return True
def createTable(self, schema, options):
"""
Creates a new table in the database based cff the inputted
schema information. If the dryRun flag is specified, then
the SQLConnection will only be logged to the current logger, and not
actually executed in the database.
:param schema | <orb.TableSchema>
info | <dict>
options | <orb.ContextOptions>
:return <bool> success
"""
# don't create abstract schemas
if schema.isAbstract():
name = schema.name()
log.debug('{0} is an abstract table, not creating'.format(name))
return False
CREATE_TABLE = self.sql('CREATE_TABLE')
data = {}
cmd = CREATE_TABLE(schema.model(), options=options, IO=data)
if not options.dryRun:
self.execute(cmd, data)
log.info('Created {0} table.'.format(schema.dbname()))
else:
print cmd % data
return True
def createView(self, schema, options):
"""
Creates a new table in the database based cff the inputted
schema information. If the dryRun flag is specified, then
the SQLConnection will only be logged to the current logger, and not
actually executed in the database.
:param schema | <orb.TableSchema>
info | <dict>
options | <orb.ContextOptions>
:return <bool> success
"""
CREATE_VIEW = self.sql('CREATE_VIEW')
data = {}
cmd = CREATE_VIEW(schema.model(), options=options, IO=data)
if not options.dryRun:
self.execute(cmd, data)
log.info('Created {0} table.'.format(schema.dbname()))
else:
print cmd % data
return True
def disableInternals(self):
"""
Disables the internal checks and update system. This method should
be used at your own risk, as it will ignore errors and internal checks
like auto-incrementation. This should be used in conjunction with
the enableInternals method, usually these are used when doing a
bulk import of data.
:sa enableInternals
"""
super(SQLConnection, self).disableInternals()
ENABLE_INTERNALS = self.sql('ENABLE_INTERNALS')
data = {}
sql = ENABLE_INTERNALS(False, IO=data)
self.execute(sql, data)
def distinct(self, table_or_join, lookup, options):
"""
Returns a distinct set of results for the given information.
:param table_or_join | <subclass of orb.Table> || <orb.Join>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return {<str> columnName: <list> value, ..}
"""
lookup.distinct = True
records = self.select(table_or_join, lookup, options)
output = defaultdict(set)
for record in records:
for column, value in record.items():
output[column].add(value)
return output
def enableInternals(self):
"""
Enables the internal checks and update system. This method should
be used at your own risk, as it will ignore errors and internal checks
like auto-incrementation. This should be used in conjunction with
the disableInternals method, usually these are used when doing a
bulk import of data.
:sa disableInternals
"""
ENABLE_INTERNALS = self.sql('ENABLE_INTERNALS')
data = {}
sql = ENABLE_INTERNALS(True, IO=data)
self.execute(sql, data)
super(SQLConnection, self).enableInternals()
def existingColumns(self, schema, options):
"""
Looks up the existing columns from the database based on the
inputted schema and namespace information.
:param schema | <orb.TableSchema>
options | <orb.ContextOptions>
:return [<str>, ..]
"""
TABLE_COLUMNS = self.sql('TABLE_COLUMNS')
data = {}
sql = TABLE_COLUMNS(schema, options=options, IO=data)
result = self.execute(sql, data)[0]
return [x['column_name'] for x in result]
def execute(self,
command,
data=None,
autoCommit=True,
autoClose=True,
returning=True,
mapper=dict,
retries=3):
"""
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
autoCommit | <bool> | commit database changes immediately
autoClose | <bool> | closes connections immediately
returning | <bool>
mapper | <variant>
retries | <int>
:return [{<str> key: <variant>, ..}, ..], <int> rowcount
"""
# make sure we don't have an undefined query
if data and orb.Query.UNDEFINED in data.values():
return [], 0
rowcount = 0
if data is None:
data = {}
command = command.strip()
if not command:
return [], 0
if not self.open():
raise errors.ConnectionFailed('Failed to open connection.',
self.database())
# when in debug mode, simply log the command to the logger
elif self.database().commandsBlocked():
log.info(command)
return [], rowcount
results = []
start = datetime.datetime.now()
for i in xrange(retries):
start = datetime.datetime.now()
try:
results, rowcount = self._execute(command,
data,
autoCommit,
autoClose,
returning,
mapper)
break
# always raise interruption errors as these need to be handled
# from a thread properly
except errors.Interruption:
delta = datetime.datetime.now() - start
log.critical('Query took: %s' % delta)
raise
# attempt to reconnect as long as we have enough retries left
# otherwise raise the error
except errors.ConnectionLost:
delta = datetime.datetime.now() - start
log.error('Query took: %s' % delta)
if i != (retries - 1):
time.sleep(0.25)
self.reconnect()
else:
raise
# handle any known a database errors with feedback information
except errors.DatabaseError as err:
delta = datetime.datetime.now() - start
log.error('Query took: %s' % delta)
log.error(u'{0}: \n {1}'.format(err, command))
raise
# always raise any unknown issues for the developer
except StandardError as err:
delta = datetime.datetime.now() - start
log.error('Query took: %s' % delta)
log.error(u'{0}: \n {1}'.format(err, command))
raise
delta = (datetime.datetime.now() - start).total_seconds()
if delta * 1000 < 3000:
log.debug('Query took: %s' % delta)
log.debug('{0}\n\ndata:{1}'.format(command, data))
elif delta * 1000 < 6000:
log.warning('Query took: %s' % delta)
log.warning(command)
log.warning('{0}\n\ndata:{1}'.format(command, data))
else:
log.error('Query took: %s' % delta)
log.error(command)
log.error('{0}\n\ndata:{1}'.format(command, data))
return results, rowcount
def insert(self, records, lookup, options):
"""
Inserts the table instance into the database. If the
dryRun flag is specified, then the command will be
logged but not executed.
:param records | <orb.Table>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return <dict> changes
"""
# convert the recordset to a list
if orb.RecordSet.typecheck(records):
records = list(records)
# wrap the record in a list
elif orb.Table.recordcheck(records) or orb.View.recordcheck(records):
records = [records]
# determine the proper records for insertion
inserter = defaultdict(list)
changes = []
for record in records:
# make sure we have some data to insert
rchanges = record.changeset(columns=lookup.columns)
changes.append(rchanges)
# do not insert records that already exist
if options.force:
pass
elif record.isRecord() or not rchanges:
continue
inserter[record.schema()].append(record)
cmds = []
data = {}
autoinc = options.autoIncrement
INSERT = self.sql('INSERT')
INSERTED_KEYS = self.sql('INSERTED_KEYS')
locks = []
for schema, schema_records in inserter.items():
if not schema_records:
continue
colcount = len(schema.columns())
batchsize = self.insertBatchSize()
size = batchsize / max(int(round(colcount / 10.0)), 1)
for batch in projex.iters.batch(schema_records, size):
batch = list(batch)
icmd = INSERT(schema,
batch,
columns=lookup.columns,
autoincrement=autoinc,
options=options,
IO=data)
if icmd:
cmds.append(icmd)
if cmds:
locks.append(WriteLocker(self.__concurrencyLocks[schema.name()], delay=0.1))
# for inherited schemas in non-OO tables, we'll define the
# primary keys before insertion
if autoinc and INSERTED_KEYS:
cmd = INSERTED_KEYS(schema, count=len(schema_records), IO=data)
cmds.append(cmd)
if not cmds:
return {}
cmd = u'\n'.join(cmds)
if options.dryRun:
print cmd % data
if len(changes) == 1:
return {}
else:
return []
else:
with MultiContext(*locks):
results, _ = self.execute(cmd, data, autoCommit=False)
if not self.commit():
if len(changes) == 1:
return {}
return []
# update the values for the database
for i, record in enumerate(records):
try:
record.updateOptions(**options.assigned())
record._updateFromDatabase(results[i])
except IndexError:
pass
record._markAsLoaded(self.database(), columns=lookup.columns)
if len(changes) == 1:
return changes[0]
return changes
def insertBatchSize(self):
"""
Returns the maximum number of records that can be inserted for a single
insert statement.
:return <int>
"""
return self.__insertBatchSize
def interrupt(self, threadId=None):
"""
Interrupts the access to the database for the given thread.
:param threadId | <int> || None
"""
cid = threading.current_thread().ident
if threadId is None:
cid = threading.current_thread().ident
for tid, conn in self.__threads.items():
if tid != cid:
conn.interrupt()
self.__threads.pop(tid)
else:
conn = self.__threads.get(threadId)
if not conn:
return
if threadId == cid:
conn.close()
else:
self._interrupt(threadId, conn)
self.__threads.pop(threadId)
def isConnected(self):
"""
Returns whether or not this connection is currently
active.
:return <bool> connected
"""
return self.nativeConnection() is not None
def nativeConnection(self):
"""
Returns the sqlite database for the current thread.
:return <variant> || None
"""
tid = threading.current_thread().ident
return self.__threads.get(tid)
def open(self, force=False):
"""
Opens a new database connection to the database defined
by the inputted database.
:return <bool> success
"""
tid = threading.current_thread().ident
# clear out old ids
for thread in threading.enumerate():
if not thread.isAlive():
self.__threads.pop(thread.ident, None)
conn = self.__threads.get(tid)
# check to see if we already have a connection going
if conn and not force:
return True
# make sure we have a database assigned to this backend
elif not self._database:
raise errors.DatabaseNotFound()
# open a new backend connection to the database for this thread
conn = self._open(self._database)
if conn:
self.__threads[tid] = conn
self._database.callbacks().emit(self._database.Signals.Connected, self)
else:
self._database.callbacks().emit(self._database.Signals.Disconnected, self)
return conn is not None
def reconnect(self):
"""
Forces a reconnection to the database.
"""
tid = threading.current_thread().ident
db = self.__threads.pop(tid, None)
if db:
try:
db.close()
except StandardError:
pass
return self.open()
def removeRecords(self, remove, options):
"""
Removes the inputted record from the database.
:param remove | {<orb.Table>: [<orb.Query>, ..], ..}
options | <orb.ContextOptions>
:return <int> number of rows removed
"""
if not remove:
return 0
# include various schema records to remove
count = 0
DELETE = self.sql('DELETE')
for table, queries in remove.items():
with WriteLocker(self.__concurrencyLocks[table.schema().name()], delay=0.1):
for query in queries:
data = {}
sql = DELETE(table, query, options=options, IO=data)
if options.dryRun:
print sql % data
else:
count += self.execute(sql, data)[1]
return count
def rollback(self):
"""
Rolls back changes to this database.
"""
db = self.nativeConnection()
if db:
db.rollback()
return True
return False
def schemaInfo(self, options):
SCHEMA_INFO = self.sql('SCHEMA_INFO')
data = {}
sql = SCHEMA_INFO(options=options, IO=data)
info = self.execute(sql, data)[0]
return {table['name']: table for table in info}
def select(self, table_or_join, lookup, options):
if orb.Table.typecheck(table_or_join) or orb.View.typecheck(table_or_join):
# ensure the primary record information is provided for orb.logger.setLevel(orb.logging.DEBUG)ions
if lookup.columns and options.inflated:
lookup.columns += [col.name() for col in
table_or_join.schema().primaryColumns()]
SELECT = self.sql().byName('SELECT')
schema = table_or_join.schema()
data = {}
sql = SELECT(table_or_join,
lookup=lookup,
options=options,
IO=data)
# if we don't have any command to run, just return a blank list
if not sql:
return []
elif options.dryRun:
print sql % data
return []
else:
with ReadLocker(self.__concurrencyLocks[schema.name()]):
records = self.execute(sql, data)[0]
store = self.sql().datastore()
for record in records:
for name, value in record.items():
column = schema.column(name)
record[name] = store.restore(column, value)
return records
else:
raise orb.errors.DatabaseError('JOIN NOT DEFINED')
def setupDatabase(self, options):
"""
Initializes the database by defining any additional structures that are required during selection.
"""
SETUP_DB = self.sql('SETUP_DB')
data = {}
try:
sql = SETUP_DB(IO=data)
except StandardError as err:
log.error(str(err))
else:
if options.dryRun:
print sql % data
else:
self.execute(sql, data)
def setInsertBatchSize(self, size):
"""
Sets the maximum number of records that can be inserted for a single
insert statement.
:param size | <int>
"""
self.__insertBatchSize = size
def setRecords(self, schema, records, **options):
"""
Restores the data for the inputted schema.
:param schema | <orb.TableSchema>
records | [<dict> record, ..]
"""
if not records:
return
engine = self.engine()
# truncate the table
cmd, dat = engine.truncateCommand(schema)
self.execute(cmd, dat, autoCommit=False)
# disable the tables keys
cmd, dat = engine.disableInternalsCommand(schema)
self.execute(cmd, dat, autoCommit=False)
colcount = len(schema.columns())
batchsize = self.insertBatchSize()
size = batchsize / max(int(round(colcount / 10.0)), 1)
# insert the records
cmds = []
dat = {}
setup = {}
for batch in projex.iters.batch(records, size):
batch = list(batch)
icmd, idata = engine.insertCommand(schema,
batch,
columns=options.get('columns'),
autoincrement=False,
setup=setup)
cmds.append(icmd)
dat.update(idata)
self.execute(u'\n'.join(cmds), dat, autoCommit=False)
# enable the table keys
cmd, dat = engine.enableInternalsCommand(schema)
self.execute(cmd, dat)
self.commit()
def tableExists(self, schema, options):
"""
Checks to see if the inputted table class exists in the
database or not.
:param schema | <orb.TableSchema>
options | <orb.ContextOptions>
:return <bool> exists
"""
TABLE_EXISTS = self.sql('TABLE_EXISTS')
data = {}
sql = TABLE_EXISTS(schema, options=options, IO=data)
return bool(self.execute(sql, data, autoCommit=False)[0])
def update(self, records, lookup, options):
"""
Updates the modified data in the database for the
inputted record. If the dryRun flag is specified then
the command will be logged but not executed.
:param record | <orb.Table>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return <dict> changes
"""
# convert the recordset to a list
if orb.RecordSet.typecheck(records):
records = list(records)
# wrap the record in a list
elif orb.Table.recordcheck(records) or orb.View.recordcheck(records):
records = [records]
updater = defaultdict(list)
changes = []
for record in records:
rchanges = record.changeset(columns=lookup.columns)
changes.append(rchanges)
if options.force:
pass
elif not record.isRecord():
continue
elif not rchanges:
continue
schemas = [record.schema()]
for schema in schemas:
updater[schema].append((record, rchanges))
if not updater:
if len(records) > 1:
return []
else:
return {}
cmds = []
data = {}
locks = []
UPDATE = self.sql('UPDATE')
for schema, changes in updater.items():
locks.append(WriteLocker(self.__concurrencyLocks[schema.name()], delay=0.1))
icmd = UPDATE(schema, changes, options=options, IO=data)
cmds.append(icmd)
cmd = u'\n'.join(cmds)
if options.dryRun:
print cmd % data
if len(changes) == 1:
return {}
else:
return []
else:
with MultiContext(*locks):
results, _ = self.execute(cmd, data, autoCommit=False)
if not self.commit():
if len(changes) == 1:
return {}
return []
# update the values for the database
for record in records:
record._markAsLoaded(self.database(),
columns=lookup.columns)
if len(changes) == 1:
return changes[0]
return changes
def updateTable(self, schema, info, options):
"""
Determines the difference between the inputted schema
and the table in the database, creating new columns
for the columns that exist in the schema and do not
exist in the database. If the dryRun flag is specified,
then the SQLConnection won't actually be executed, just logged.
:note This method will NOT remove any columns, if a column
is removed from the schema, it will simply no longer
be considered part of the table when working with it.
If the column was required by the db, then it will need to
be manually removed by a database manager. We do not
wish to allow removing of columns to be a simple API
call that can accidentally be run without someone knowing
what they are doing and why.
:param schema | <orb.TableSchema>
options | <orb.ContextOptions>
:return <bool> success
"""
# determine the new columns
existing_columns = info['columns']
all_columns = schema.fieldNames(recurse=False, kind=orb.Column.Kind.Field)
missing_columns = set(all_columns).difference(existing_columns)
# determine new indexes
table_name = schema.dbname()
existing_indexes = info['indexes'] or []
all_indexes = [table_name + '_' + projex.text.underscore(index.name().lstrip('by')) + '_idx'
for index in schema.indexes(recurse=False)]
all_indexes += [table_name + '_' + projex.text.underscore(column.indexName().lstrip('by')) + '_idx'
for column in schema.columns(recurse=False, kind=orb.Column.Kind.Field)
if column.indexed() and not column.primary()]
missing_indexes = set(all_indexes).difference(existing_indexes)
# if no columns are missing, return True to indicate the table is
# up to date
if not (missing_columns or missing_indexes):
return True
columns = [schema.column(col) for col in missing_columns]
ALTER = self.sql('ALTER_TABLE')
data = {}
sql = ALTER(schema, added=columns, options=options, IO=data)
if options.dryRun:
print sql % data
else:
self.execute(sql, data)
opts = (schema.name(), ','.join(missing_columns), ','.join(missing_indexes))
log.info('Updated {0} table, added {1} columns and {2} indexes.'.format(*opts))
return True
@classmethod
def sql(cls, code=''):
"""
Returns the statement interface for this connection.
:return subclass of <orb.core.backends.sql.SQL>
"""
if code:
return SQL.byName(code)
else:
return SQL
"""
Defines the base SQL class used for rendering SQL statements
out.
"""
import logging
import mako
import mako.template
import os
import orb
import sys
from projex.addon import AddonManager
log = logging.getLogger(__name__)
class SQL(AddonManager):
def __init__(self, sql, baseSQL=None):
super(SQL, self).__init__()
# define custom properties
self._template = mako.template.Template(sql, strict_undefined=True)
self._sql = sql
self._baseSQL = baseSQL or SQL
def __call__(self, *args, **options):
"""
Executes this statement with the inputted keywords to generate
the context SQL statement.
:param **options | <keywords>
:sa render
:return <str> sql, <dict> data
"""
# noinspection PyArgumentList
return self.render(*args, **options)
def baseSQL(self):
"""
Returns the base SQL type to use for this instance.
:return subclass of <SQL>
"""
return self._baseSQL
def render(self, **scope):
"""
Executes this statement with the inputted keywords to generate
the context SQL statement. Any keywords provided to the render
method will be used as scope variables within the mako template for
this SQL class.
:param **scope | <keywords>
:return <str> sql, <dict> data
"""
# define common properties
scope.setdefault('orb', orb)
scope.setdefault('SQL', self.baseSQL())
scope.setdefault('QUOTE', scope['SQL'].byName('QUOTE'))
scope.setdefault('IO', {})
scope.setdefault('GLOBALS', {})
text = self._template.render(**scope)
return text.strip()
def setSQL(self, sql):
"""
Sets the SQL mako statement for this instance. This will generate
a new mako Template that will be used when executing this command
during generation.
:param sql | <str>
"""
self._sql = sql
self._template = mako.template.Template(sql)
def sql(self):
"""
Returns the template for this statement.
:return <str>
"""
return self._sql
@classmethod
def createDatastore(cls):
"""
Creates a new datastore instance for this sql class.
:return <orb.DataStore>
"""
return orb.DataStore()
@classmethod
def datastore(cls):
"""
Returns the base data store class for this SQL definition.
:return subclass of <orb.DataStore>
"""
key = '_{0}__datastore'.format(cls.__name__)
try:
return getattr(cls, key)
except AttributeError:
store = cls.createDatastore()
setattr(cls, key, store)
return store
@classmethod
def loadStatements(cls, module):
"""
Loads the mako definitions for the inputted name. This is the inputted
module that will be attempting to access the file. When running
with mako file support, this will read and load the mako file, when
built it will load a _mako.py module that defines the TEMPLATE variable
as a string.
:param name | <str>
:return <str>
"""
# load the shared statements
from orb.core.backends.sql.shared import sql as shared_sql
# load from the built table of contents
if hasattr(module, '__toc__') and module.__toc__:
mako_mods = module.__toc__
for mako_mod in mako_mods:
try:
__import__(mako_mod)
templ = sys.modules[mako_mod].TEMPLATE
except StandardError:
log.error('Failed to load mako file: {0}'.format(mako_mod))
continue
else:
name = mako_mod.split('.')[-1].replace('_sql_mako', '').upper()
typ = getattr(shared_sql, name, SQL)
cls.registerAddon(name, typ(templ, cls))
# load from the directory
else:
base = os.path.dirname(module.__file__)
files = os.listdir(os.path.dirname(module.__file__))
for filename in files:
if not filename.endswith('.mako'):
continue
with open(os.path.join(base, filename), 'r') as f:
templ = f.read()
name = filename.split('.')[0].upper()
typ = getattr(shared_sql, name, SQL)
cls.registerAddon(name, typ(templ, cls))
""" Defines the backend connection class for PostgreSQL databases. """
from .connection import MySQLConnection
from .sql import MySQL
from .store import MySQLDataStore
""" Defines the backend connection class for MySQL databases. """
import datetime
import logging
import os
import orb
import re
import traceback
from orb import errors
from projex.text import nativestring as nstr
from ..abstractconnection import SQLConnection
from ..abstractsql import SQL
log = logging.getLogger(__name__)
try:
import pymysql
except ImportError:
log.debug('For MySQL backend, download the PyMySQL module')
pymysql = None
# ----------------------------------------------------------------------
# noinspection PyAbstractClass
class MySQLConnection(SQLConnection):
"""
Creates a MySQL backend connection type for handling database
connections to MySQL databases.
"""
# ----------------------------------------------------------------------
# PROTECTED METHODS
# ----------------------------------------------------------------------
def _execute(self, command, data=None, autoCommit=True, autoClose=True,
returning=True, mapper=dict):
"""
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
autoCommit | <bool> | commit database changes immediately
autoClose | <bool> | closes connections immediately
:return [{<str> key: <variant>, ..}, ..], <int> count
"""
rowcount = 0
if data is None:
data = {}
# when in debug mode, simply log the command to the log
elif self.database().commandsBlocked():
log.info(command)
return [], rowcount
# create a new cursor for this transaction
db = self.nativeConnection()
if db is None:
raise errors.ConnectionLost()
with db.cursor() as cursor:
start = datetime.datetime.now()
try:
cursor.execute(command, data)
rowcount = cursor.rowcount
# # look for a cancelled query
# except QueryCanceledError as cancelled:
# try:
# db.rollback()
# except StandardError as err:
# log.error('Rollback error: {0}'.format(err))
# log.critical(command)
# if data:
# log.critical(str(data))
#
# # raise more useful errors
# if 'statement timeout' in str(cancelled):
# raise errors.QueryTimeout(command, (datetime.datetime.now() - start).total_seconds())
# else:
# raise errors.Interruption()
# look for a disconnection error
except pymysql.InterfaceError:
raise errors.ConnectionLost()
# look for integrity errors
except (pymysql.IntegrityError, pymysql.OperationalError), err:
try:
db.rollback()
except StandardError:
pass
# look for a duplicate error
duplicate_error = re.search('Key (.*) already exists.', nstr(err))
if duplicate_error:
key = duplicate_error.group(1)
result = re.match('^\(lower\((?P<column>[^\)]+)::text\)\)=\((?P<value>[^\)]+)\)$', key)
if not result:
result = re.match('^(?P<column>\w+)=(?P<value>\w+)', key)
if result:
msg = '{value} is already being used.'.format(**result.groupdict())
raise errors.DuplicateEntryFound(msg)
else:
raise errors.DuplicateEntryFound(duplicate_error.group())
# look for a reference error
reference_error = re.search('Key .* is still referenced from table ".*"', nstr(err))
if reference_error:
msg = 'Cannot remove this record, it is still being referenced.'
raise errors.CannotDelete(msg)
# unknown error
log.debug(traceback.print_exc())
raise errors.QueryFailed(command, data, nstr(err))
# connection has closed underneath the hood
except pymysql.Error, err:
try:
db.rollback()
except StandardError:
pass
log.error(traceback.print_exc())
raise errors.QueryFailed(command, data, nstr(err))
try:
results = [mapper(record) for record in cursor.fetchall()]
except pymysql.ProgrammingError:
results = []
if autoCommit:
self.commit()
return results, rowcount
def _open(self, db):
"""
Handles simple, SQL specific connection creation. This will not
have to manage thread information as it is already managed within
the main open method for the SQLBase class.
:param db | <orb.Database>
:return <variant> | backend specific database connection
"""
if not pymysql:
raise errors.BackendNotFound('PyMySQL is not installed.')
dbname = db.databaseName()
user = db.username()
pword = db.password()
host = db.host()
if not host:
host = 'localhost'
port = db.port()
if not port:
port = 3306
# create the python connection
try:
return pymysql.connect(db=dbname,
user=user,
passwd=pword,
host=host,
port=port,
cursorclass=pymysql.cursors.DictCursor)
except pymysql.OperationalError, err:
log.error(err)
raise errors.ConnectionFailed('Failed to connect to MySQL', db)
def _interrupt(self, threadId, connection):
"""
Interrupts the given native connection from a separate thread.
:param threadId | <int>
connection | <variant> | backend specific database.
"""
try:
connection.close()
except pymysql.Error:
pass
# ----------------------------------------------------------------------
@classmethod
def sql(cls, code=''):
"""
Returns the statement interface for this connection.
:param code | <str>
:return subclass of <orb.core.backends.sql.SQLStatement>
"""
if code:
return SQL.byName('MySQL').byName(code)
else:
return SQL.byName('MySQL')
# register the mysql backend
if pymysql:
orb.Connection.registerAddon('MySQL', MySQLConnection)
"""
Defines the base MySQL class used for all MySQL based statements.
"""
import orb
from ..abstractsql import SQL
from .statements import __plugins__
class MySQL(SQL):
@classmethod
def createDatastore(cls):
"""
Returns the MySQL data store class.
:return subclass of <orb.DataStore>
"""
store_type = orb.DataStore.byName('MySQL', orb.DataStore)
return store_type()
# load the SQL plugins for MySQL
MySQL.loadStatements(__plugins__)
# ----------------------------------------------------------------------
# define custom column types for MySQL
MySQL.registerAddon('Type::Bool', u'BOOLEAN')
MySQL.registerAddon('Type::BigInt', u'BIGINT')
MySQL.registerAddon('Type::ByteArray', u'BYTEA')
MySQL.registerAddon('Type::Color', u'CHARACTER VARYING')
MySQL.registerAddon('Type::Datetime', u'TIMESTAMP WITHOUT TIME ZONE')
MySQL.registerAddon('Type::DatetimeWithTimezone', u'TIMESTAMP WITHOUT TIME ZONE')
MySQL.registerAddon('Type::Decimal', u'DECIMAL')
MySQL.registerAddon('Type::Dict', u'BYTEA')
MySQL.registerAddon('Type::Directory', u'CHARACTER VARYING')
MySQL.registerAddon('Type::Double', u'DOUBLE PRECISION')
MySQL.registerAddon('Type::Email', u'CHARACTER VARYING')
MySQL.registerAddon('Type::Enum', u'INTEGER')
MySQL.registerAddon('Type::Filepath', u'CHARACTER VARYING')
MySQL.registerAddon('Type::ForeignKey', u'BIGINT')
MySQL.registerAddon('Type::Image', u'BYTEA')
MySQL.registerAddon('Type::Integer', u'INTEGER')
MySQL.registerAddon('Type::Interval', u'TIMEDELTA')
MySQL.registerAddon('Type::Password', u'CHARACTER VARYING')
MySQL.registerAddon('Type::Pickle', u'BYTEA')
MySQL.registerAddon('Type::String', u'CHARACTER VARYING')
MySQL.registerAddon('Type::Url', u'CHARACTER VARYING')
# register the MySQLStatement addon interface to the base SQLStatement class
SQL.registerAddon('MySQL', MySQL)
# register the statements addons for MySQL
from .statements import __plugins__
MySQL.registerAddonModule(__plugins__)
__recurse__ = False
__toc__ = [r'orb.core.backends.sql.mysql.statements.add_column_sql_mako',
r'orb.core.backends.sql.mysql.statements.alter_table_sql_mako',
r'orb.core.backends.sql.mysql.statements.create_index_sql_mako',
r'orb.core.backends.sql.mysql.statements.create_table_sql_mako',
r'orb.core.backends.sql.mysql.statements.create_view_sql_mako',
r'orb.core.backends.sql.mysql.statements.delete_sql_mako',
r'orb.core.backends.sql.mysql.statements.enable_internals_sql_mako',
r'orb.core.backends.sql.mysql.statements.insert_sql_mako',
r'orb.core.backends.sql.mysql.statements.quote_sql_mako',
r'orb.core.backends.sql.mysql.statements.schema_info_sql_mako',
r'orb.core.backends.sql.mysql.statements.select_aggregate_sql_mako',
r'orb.core.backends.sql.mysql.statements.select_count_sql_mako',
r'orb.core.backends.sql.mysql.statements.select_expand_sql_mako',
r'orb.core.backends.sql.mysql.statements.select_joiner_sql_mako',
r'orb.core.backends.sql.mysql.statements.select_shortcut_sql_mako',
r'orb.core.backends.sql.mysql.statements.select_sql_mako',
r'orb.core.backends.sql.mysql.statements.setup_db_sql_mako',
r'orb.core.backends.sql.mysql.statements.update_sql_mako',
r'orb.core.backends.sql.mysql.statements.where_sql_mako']
% if column.testFlag(column.Flags.AutoIncrement):
ADD COLUMN `${field}` SERIAL
% elif reference:
ADD COLUMN `${field}` ${type} REFERENCES `${reference}`
% elif type == 'CHARACTER VARYING' and max_length:
ADD COLUMN `${field}` CHARACTER VARYING(${max_length}) ${' '.join(flags)}
% else:
ADD COLUMN `${column.fieldName()}` ${type} ${' '.join(flags)}
% endif
% if added['base']:
-- update the table
ALTER TABLE `${table}`
% for column in added['base'][:-1]:
${ADD_COLUMN(column)},
% endfor
${ADD_COLUMN(added['base'][-1])}
;
% endif
% if added['i18n']:
-- ensure the translation table exists (in case this is the first set of columns)
CREATE TABLE IF NOT EXISTS `${table}_i18n` (
-- define the pkey columns
`locale` CHARACTER VARYING(5),
`${table}_id` BIGINT REFERENCES `${table}` (${u','.join([QUOTE(col.fieldName()) for col in added['primary']])}) ON DELETE CASCADE,
-- define the pkey constraints
CONSTRAINT `${table}_i18n_pkey` PRIMARY KEY (`locale`, `${table}_id`)
) WITH (OIDS=FALSE);
ALTER TABLE `${table}_i18n` OWNER TO `${owner}`;
-- add the missing columns to the translation table
ALTER TABLE `${table}_i18n`
% for column in added['i18n'][:-1]:
${ADD_COLUMN(column)},
% endfor
${ADD_COLUMN(added['i18n'][-1])}
;
% endif
## create any indexes for this new table
% for column in schema.columns():
% if column.indexed() and not column.primary():
${CREATE_INDEX(column, checkExists=True, GLOBALS=GLOBALS, IO=IO)}
% endif
% endfor
% for index in schema.indexes():
${CREATE_INDEX(index, checkExists=True, GLOBALS=GLOBALS, IO=IO)}
% endfor
<%
import projex.text
columns = []
if column:
index_name = projex.text.underscore(column.indexName().lstrip('by')) + '_idx'
table_name = column.schema().dbname()
unique = column.unique()
col_table_name = table_name if not column.isTranslatable() else table_name + '_i18n'
if not column.testFlag(column.Flags.CaseSensitive) and column.isString():
columns.append('lower("{0}")'.format(column.fieldName()))
else:
columns.append('"{0}"'.format(column.fieldName()))
else:
index_name = projex.text.underscore(index.name().lstrip('by')) + '_idx'
table_name = index.schema().dbname()
unique = index.unique()
cols = [index.schema().column(col_name) for col_name in index.columnNames()]
translatable = {col.isTranslatable() for col in cols}
# can only create a DB index on multiple columns if they're on the same table
if len(translatable) == 1:
if list(translatable)[0]:
table_name += '_i18n'
for col in cols:
if not col.testFlag(col.Flags.CaseSensitive) and col.isString():
columns.append('lower("{0}")'.format(col.fieldName()))
else:
columns.append('"{0}"'.format(col.fieldName()))
%>
% if columns:
% if checkExists:
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_indexes
WHERE schemaname = 'public'
AND indexname = '${table_name}_${index_name}'
) THEN CREATE ${'UNIQUE' if unique else ''} INDEX ${table_name}_${index_name} ON `${table_name}` (${', '.join(columns)});
END IF;
END$$;
% else:
CREATE ${'UNIQUE' if unique else ''} INDEX ${table_name}_${index_name} ON `${table_name}` (${', '.join(columns)});
% endif
% else:
% endif
-- create the table
CREATE TABLE IF NOT EXISTS `${table}` (
-- define the columns
% for i, column in enumerate(columns['base'] + columns['primary']):
${ADD_COLUMN(column).replace('ADD COLUMN ', '')},
% endfor
% if columns['primary']:
-- define the primary key constraint
CONSTRAINT `${table}_pkey` PRIMARY KEY (${u','.join([QUOTE(col.fieldName()) for col in columns['primary']])})
% endif
)
% if inherits:
INHERITS `${inherits}`
% endif
WITH (OIDS=FALSE);
ALTER TABLE `${table}` OWNER TO `${owner}`;
% if columns['i18n']:
-- create the translations table
CREATE TABLE `${table}_i18n` (
-- define the columns
`locale` CHARACTER VARYING(5),
`${table}_id` BIGINT REFERENCES `${table}` (${u','.join([QUOTE(col.fieldName()) for col in columns['primary']])}) ON DELETE CASCADE,
% for column in columns['translations']:
${ADD_COLUMN(column).replace('ADD COLUMN ', '')},
% endfor
-- define the constraints
CONSTRAINT `${table}_i18n_pkey` PRIMARY KEY (`${table}_id`, `locale`)
)
WITH (OIDS=FALSE);
ALTER TABLE `${table}_i18n` OWNER TO `${owner}`;
% endif
<%
import projex.text
from collections import OrderedDict
from orb import errors
primary = schema.primaryColumns()[0]
reference_model = primary.referenceModel()
reference_table = reference_model.schema().dbname()
preload = {}
columns = []
group_by = []
joins = OrderedDict()
def populate(schema, source, parts, alias):
next_part = parts.pop(0)
# look for an aggregate
column = schema.column(next_part)
# join in a column
if column:
if not column.isReference():
return '{0}.{1}'.format(alias, QUOTE(column.fieldName()))
else:
ref_model = column.referenceModel()
join_alias = QUOTE(ref_model.schema().dbname() + '_' + projex.text.underscore(column.name()))
target = '{0}.{1}'.format(join_alias, QUOTE(ref_model.schema().primaryColumns()[0].fieldName()))
source = '{0}.{1}'.format(alias, QUOTE(column.fieldName()))
join = {'table': QUOTE(ref_model.schema().dbname()),
'alias': join_alias,
'on': '{0} = {1}'.format(target, source)}
joins.setdefault(join_alias, join)
group_by.append(target)
if not parts:
return target
else:
return populate(ref_model.schema(), target, parts, join_alias)
# join in a lookup or pipe
else:
pipe = schema.pipe(next_part)
if not pipe:
rev_lookup = schema.reverseLookup(next_part)
if not rev_lookup:
raise errors.ColumnNotFound(schema.name(), next_part)
try:
record_part = parts.pop(0)
invert_dir = record_part == 'last'
except IndexError:
raise errors.QueryInvalid('Cannot join in a record set for a View.')
# define x here or mako will not render this template (preprocessor fails to find the
# value within list compression)
x = 0
if pipe:
join_schema = pipe.targetReferenceModel().schema()
join_primary = join_schema.primaryColumns()[0]
pipe_schema = pipe.pipeReferenceModel().schema()
source_col = pipe_schema.column(pipe.sourceColumn())
target_col = pipe_schema.column(pipe.targetColumn())
join_field = QUOTE(source_col.fieldName())
column_name = projex.text.underscore(pipe.name())
order = join_schema.defaultOrder() or [(join_primary.name(), 'asc')]
if invert_dir:
order = [(x[0], 'asc' if x[1] == 'desc' else 'desc') for x in order]
order = [QUOTE(join_schema.dbname(), join_schema.column(x[0]).fieldName()) + ' ' + x[1].upper()
for x in order]
opts = (
QUOTE(source_col.fieldName()),
QUOTE(join_schema.dbname()),
QUOTE(pipe_schema.dbname()),
QUOTE(target_col.fieldName()),
QUOTE(join_schema.dbname(), join_primary.fieldName()),
', '.join(order)
)
if record_part in ('first', 'last'):
join_table = QUOTE('_'.join((join_schema.dbname(), column_name, record_part)))
preload_as = '(SELECT DISTINCT ON (j.{0}) {1}.*, j.{0} FROM {1} ' \
'LEFT JOIN {2} j ON j.{3} = {4} ORDER BY j.{0}, {5})'
preload.setdefault(join_table, preload_as.format(*opts))
else:
join_table = QUOTE(join_schema.dbname())
else:
join_schema = rev_lookup.schema()
join_primary = join_schema.primaryColumns()[0]
join_field = QUOTE(rev_lookup.fieldName())
column_name = projex.text.underscore(rev_lookup.reversedName())
order = join_schema.defaultOrder() or [(join_primary.name(), 'asc')]
if invert_dir:
order = [(x[0], 'asc' if x[1] == 'desc' else 'desc') for x in order]
order = [QUOTE(join_schema.dbname(), join_schema.column(x[0]).fieldName()) + ' ' + x[1].upper()
for x in order]
opts = (
QUOTE(rev_lookup.fieldName()),
QUOTE(join_schema.dbname()),
QUOTE(join_primary.fieldName()),
', '.join(order)
)
if record_part in ('first', 'last'):
join_table = QUOTE('_'.join((join_schema.dbname(), column_name, record_part)))
preload_as = '(SELECT DISTINCT ON ({0}) {1}.* FROM {1} ORDER BY {0}, {3})'
preload.setdefault(join_table, preload_as.format(*opts))
else:
join_table = QUOTE(join_schema.dbname())
join_alias = QUOTE('_'.join((join_schema.dbname(), column_name, record_part)))
target = '{0}.{1}'.format(join_alias, QUOTE(join_primary.fieldName()))
join = {'table': join_table,
'alias': join_alias,
'on': '{0}.{1} = {2}'.format(join_alias, join_field, source)}
joins.setdefault(join_alias, join)
if record_part == 'count':
return 'count({0}.*)'.format(join_alias)
elif record_part == 'ids':
return 'array_agg({0})'.format(target)
elif not parts:
group_by.append(target)
return target
else:
return populate(join_schema, target, parts, join_alias)
curr_schema = reference_model.schema()
curr_field = QUOTE(reference_table, reference_model.schema().primaryColumns()[0].fieldName())
columns.append('{0} AS {1}'.format(curr_field, QUOTE(primary.fieldName())))
group_by.append(curr_field)
for column in schema.columns():
if column.primary():
continue
parts = column.shortcut().split('.')
if not (len(parts) > 1 and parts[0] == primary.name()):
msg = 'Invalid column ({0}) on View ({1}). All columns must be shortcuts.'
raise errors.OrbError(msg.format(column.name(), curr_schema.name()))
field_name = populate(curr_schema, curr_field, parts[1:], QUOTE(curr_schema.dbname()))
columns.append('{0} AS {1}'.format(field_name, column.fieldName()))
%>
DROP ${'MATERIALIZED' if schema.isStatic() else ''} VIEW IF EXISTS `${schema.dbname()}`;
CREATE ${'MATERIALIZED' if schema.isStatic() else ''} VIEW `${schema.dbname()}` AS (
% if preload:
WITH
% for i, (preload_table, preload_as) in enumerate(preload.items()):
${preload_table if not i else ',' + preload_table} AS ${preload_as}
% endfor
% endif
SELECT ${',\n '.join(columns)}
FROM `${reference_table}`
% for join in joins.values():
LEFT JOIN ${join['table']} AS ${join['alias']} ON ${join['on']}
% endfor
GROUP BY ${',\n '.join(group_by)}
);
DELETE FROM `${table}`
% if where:
WHERE ${where}
% endif
RETURNING *;
% if not table:
SET unique_checks=${int(enabled)};
SET foreign_key_checks=${int(enabled)};
% else:
% if enabled:
ALTER TABLE `${table}` ENABLE KEYS;
% else:
ALTER TABLE `${table}` DISABLE KEYS;
% endif
% endif
% if insertions['base']:
INSERT INTO `${table}` (
${','.join(['`{0}`'.format(column.fieldName()) for column in columns['base']])}
)
VALUES
% for row in insertions['base'][:-1]:
(${','.join(row)}),
% endfor
(${','.join(insertions['base'][-1])})
RETURNING id;
% endif
% if insertions['i18n']:
<% count = len(insertions['i18n']) %>
INSERT INTO `${table}_i18n` (
`${table}_id`, `locale`, ${','.join(['`{0}`'.format(column.fieldName()) for column in columns['i18n']])}
)
VALUES
% for i, row in enumerate(insertions['i18n'][:-1]):
(LASTVAL() - ${count - (i+1)}, %(locale)s, ${','.join(row)}),
%endfor
(LASTVAL(), %(locale)s, ${','.join(insertions['i18n'][-1])})
RETURNING `${table}_id` AS `id`;
% endif
${joiner.join([u'`{0}`'.format(t) for t in text])}
SELECT t.table_name AS name,
(
SELECT array_agg(c.column_name::varchar)
FROM information_schema.columns AS c
WHERE c.table_schema = '${namespace}'
AND c.table_name IN (t.table_name, t.table_name || '_i18n')
) AS `columns`,
(
SELECT array_agg(i.indexname)
FROM pg_indexes AS i
WHERE i.schemaname = '${namespace}'
AND i.tablename IN (t.table_name, t.table_name || '_i18n')
) AS `indexes`
FROM information_schema.tables AS t
WHERE t.table_schema = '${namespace}'
AND t.table_name NOT ILIKE '%%_i18n';
<%
WHERE = SQL.byName('WHERE')
aggr = column.aggregator()
src_table = column.schema().dbname()
ref_col = aggr.referenceColumn()
ref_table = ref_col.schema().dbname()
targ_col = aggr.targetColumn()
query = aggr.where(column)
if query is not None:
where = WHERE(ref_col.schema(), query, GLOBALS=GLOBALS, IO=IO)
else:
where = None
pcols = []
for pcol in column.schema().primaryColumns():
pcols.append('`{0}`.`{1}`'.format(src_table, pcol.fieldName()))
# create the join table name
join_count = GLOBALS.get('join_count', 0)
join_count += 1
join_table = 'join_{0}'.format(join_count)
join_col = 'SUM(COALESCE(`{0}`.`{1}`,0)) AS `{1}`'.format(join_table,
column.name())
GLOBALS['join_count'] = join_count
GLOBALS['join_table'] = join_table
GLOBALS['join_column'] = join_col
GLOBALS.setdefault('field_mapper', {})
agg_type = aggr.aggregateType()
if agg_type == orb.QueryAggregate.Type.Count:
command = 'COUNT(*)'
elif agg_type == orb.QueryAggregate.Type.Sum:
command = 'SUM(`{0}`)'.format(targ_col.fieldName())
elif agg_type == orb.QueryAggregate.Type.Maximum:
command = 'MAX(`{0}`)'.format(targ_col.fieldName())
elif agg_type == orb.QueryAggregate.Type.Minimum:
command = 'MIN(`{0}`)'.format(targ_col.fieldName())
else:
raise orb.QueryInvalid('Invalid aggregate type.')
GLOBALS['field_mapper'][column] = '`{0}`.`{1}`'.format(join_table, column.name())
%>
LEFT JOIN (
SELECT `${ref_col.fieldName()}`, ${command} AS `${column.name()}`
FROM `${ref_table}`
% if where:
WHERE ${where}
% endif
GROUP BY `${ref_col.fieldName()}`
) AS `${join_table}`
ON `${join_table}`.`${ref_col.fieldName()}` = (${','.join(pcols)})
<%
SELECT = SQL.byName('SELECT')
# only select the primary keys if we're only interested in the row count
if not lookup.columns:
lookup.columns = [col.name() for col in table.schema().primaryColumns()]
select_command = SELECT(table, lookup=lookup, options=options, GLOBALS=GLOBALS, IO=IO).strip(';')
%>
% if select_command:
SELECT COUNT(*) AS count
FROM (
${select_command}
) AS records;
% endif
<%
import projex.text
from orb import errors
def collect_sub_expand(schema, tree):
output = {}
for expand_name, sub_tree in tree.items():
# cannot expand these keywords
if expand_name in ('ids', 'count'):
continue
# these keywords are expanded via their sub-tree
elif expand_name in ('first', 'last'):
output.update(collect_sub_expand(schema, sub_tree))
# lookup a column, pipe, or reverseLookup
else:
column = schema.column(expand_name)
if column:
output[('column', column)] = sub_tree
continue
pipe = schema.pipe(expand_name)
if pipe:
output[('pipe', pipe)] = sub_tree
continue
lookup = schema.reverseLookup(expand_name)
if lookup:
output[('reverseLookup', lookup)] = sub_tree
continue
raise errors.ColumnNotFound(schema.name(), expand_name)
return output
%>
% if pipe:
<%
source_table = pipe.sourceReferenceModel()
join_table = pipe.pipeReferenceModel()
source_column = join_table.schema().column(pipe.sourceColumn())
target_column = join_table.schema().column(pipe.targetColumn())
target_table = pipe.targetReferenceModel()
table_name = target_table.schema().dbname()
has_translations = target_table.schema().hasTranslations()
columns = []
colname = pipe.name()
col_name = projex.text.underscore(colname)
alias = col_name + '_table'
records_alias = col_name + '_records'
source_table_name = source_alias or source_table.schema().dbname()
if 'ids' in tree:
columns.append('array_agg({0}.id) AS ids'.format(records_alias))
if 'count' in tree:
columns.append('count({0}.*) AS count'.format(records_alias))
if 'first' in tree:
columns.append('(array_agg(row_to_json({0}.*)))[1] AS first'.format(records_alias)),
if 'last' in tree:
columns.append('(array_agg(row_to_json({0}.*)))[count({0}.*)] AS last'.format(records_alias)),
if 'records' in tree or not columns:
columns.append('array_agg(row_to_json({0}.*)) AS records'.format(records_alias))
%>
(
SELECT row_to_json(${col_name}_row) FROM (
SELECT ${', '.join(columns)}
FROM (
% if not has_translations:
SELECT ${alias}.*
% else:
SELECT ${alias}.*, ${alias}_i18n.*
% endif
% for (type, object), sub_tree in collect_sub_expand(target_table.schema(), tree).items():
,${SELECT_EXPAND(**{type: object, 'tree': sub_tree, 'source_alias': alias, 'GLOBALS': GLOBALS, 'IO': IO, 'options': options})}
% endfor
FROM `${table_name}` AS `${alias}`
% if has_translations:
LEFT JOIN `${table_name}_i18n` AS `${alias}_i18n` ON ${alias}.id = ${alias}_i18n.${table_name}_id AND ${alias}_i18n.locale = '${options.locale}'
% endif
WHERE `${alias}`.id IN (
SELECT DISTINCT ON (j.`${target_column.fieldName()}`) j.`${target_column.fieldName()}`
FROM `${join_table.schema().dbname()}` AS j
WHERE j.`${source_column.fieldName()}` = `${source_table_name}`.id
${'LIMIT 1' if pipe.unique() else ''}
)
) ${records_alias}
) ${col_name}_row
) AS `${colname}`
% elif reverseLookup:
<%
source_schema = reverseLookup.schema()
source_column = reverseLookup
table_name = source_schema.dbname()
ref_schema = reverseLookup.referenceModel().schema()
ref_table_name = source_alias or ref_schema.dbname()
has_translations = source_schema.hasTranslations()
colname = reverseLookup.reversedName()
col_name = projex.text.underscore(colname)
alias = col_name + '_table'
records_alias = col_name + '_records'
columns = []
if 'ids' in tree:
columns.append('array_agg({0}.id) AS ids'.format(records_alias))
if 'count' in tree:
columns.append('count({0}.*) AS count'.format(records_alias))
if 'first' in tree:
columns.append('(array_agg(row_to_json({0}.*)))[1] AS first'.format(records_alias)),
if 'last' in tree:
columns.append('(array_agg(row_to_json({0}.*)))[count({0}.*)] AS last'.format(records_alias)),
if 'records' in tree or not columns:
columns.append('array_agg(row_to_json({0}.*)) AS records'.format(records_alias))
%>
(
SELECT row_to_json(${col_name}_row) FROM (
SELECT ${', '.join(columns)}
FROM (
% if not has_translations:
SELECT ${alias}.*
% else:
SELECT ${alias}.*, ${alias}_i18n.*
% endif
% for (type, object), sub_tree in collect_sub_expand(source_schema, tree).items():
,${SELECT_EXPAND(**{type: object, 'tree': sub_tree, 'source_alias': alias, 'GLOBALS': GLOBALS, 'IO': IO, 'options': options})}
% endfor
FROM `${table_name}` AS `${alias}`
% if has_translations:
LEFT JOIN `${table_name}_i18n` AS `${alias}_i18n` ON ${alias}.id = ${alias}_i18n.${table_name}_id AND ${alias}_i18n.locale = '${options.locale}'
% endif
WHERE `${alias}`.`${source_column.fieldName()}` = `${ref_table_name}`.id
${'LIMIT 1' if source_column.unique() else ''}
) ${records_alias}
) ${col_name}_row
) AS `${colname}`
% elif column:
<%
reference = column.referenceModel()
ref_table_name = source_alias or column.schema().dbname()
colname = column.name()
table_name = reference.schema().dbname()
col_name = projex.text.underscore(colname)
alias = projex.text.underscore(column.name()) + '_table'
has_translations = reference.schema().hasTranslations()
col_concats = []
for ref_col in reference.schema().columns():
col_concats.append("CONCAT('\"{0}\":', '\"', {1}_row.`{0}`, '\"')".format(ref_col.fieldName(), col_name))
%>
(
SELECT CONCAT('{',
CONCAT_WS(',', ${','.join(col_concats)}),
'}') FROM (
% if not has_translations:
SELECT `${alias}`.*
% else:
SELECT `${alias}`.*, `${alias}_i18n`.*
% endif
% for (type, object), sub_tree in collect_sub_expand(reference.schema(), tree).items():
,${SELECT_EXPAND(**{type: object, 'tree': sub_tree, 'source_alias': alias, 'GLOBALS': GLOBALS, 'IO': IO, 'options': options})}
% endfor
FROM `${table_name}` AS `${alias}`
% if has_translations:
LEFT JOIN `${table_name}_i18n` AS `${alias}_i18n` ON `${alias}`.id = `${alias}_i18n`.${table_name}_id AND ${alias}_i18n.locale = '${options.locale}'
% endif
WHERE `${alias}`.id = `${ref_table_name}`.`${column.fieldName()}`
) ${col_name}_row
) AS `${colname}`
% endif
<%
WHERE = SQL.byName('WHERE')
joiner = column.joiner()
src_table = column.schema().dbname()
ref_col = joiner.referenceColumn()
ref_table = ref_col.schema().dbname()
targ_col = joiner.targetColumn()
query = joiner.where(column)
if query is not None:
where = WHERE(ref_col.schema(), query, GLOBALS=GLOBALS, IO=IO)
else:
where = None
pcols = []
for pcol in column.schema().primaryColumns():
pcols.append('"{0}"."{1}"'.format(src_table, pcol.fieldName()))
# create the join table name
join_count = GLOBALS.get('join_count', 0)
join_count += 1
join_table = 'join_{0}'.format(join_count)
join_col = '(ARRAY_AGG("{0}"."{1}"))[1] AS "{2}"'.format(join_table,
column.name(),
column.name())
GLOBALS.setdefault('field_mapper', {})
GLOBALS['join_count'] = join_count
GLOBALS['join_table'] = join_table
GLOBALS['join_column'] = join_col
GLOBALS['field_mapper'][column] = '`{0}`.`{1}`'.format(join_table,
column.name())
%>
LEFT JOIN (
SELECT DISTINCT ON (`${ref_col.fieldName()}`)
`${ref_col.fieldName()}`, `${targ_col.fieldName()}` AS `${column.name()}`
FROM `${ref_table}`
% if where:
WHERE ${where}
% endif
) AS `${join_table}`
ON `${join_table}`.`${ref_col.fieldName()}` = (${','.join(pcols)})
<%
SELECT_AGGREGATE = SQL.byName('SELECT_AGGREGATE')
SELECT_JOINER = SQL.byName('SELECT_JOINER')
SELECT_EXPAND = SQL.byName('SELECT_EXPAND')
SELECT_SHORTCUT = SQL.byName('SELECT_SHORTCUT')
WHERE = SQL.byName('WHERE')
ID = orb.system.settings().primaryField()
GLOBALS['field_mapper'] = {}
schema = table.schema()
table_name = schema.dbname()
def cmpcol(a, b):
result = cmp(a.isAggregate(), b.isAggregate())
if not result:
result = cmp(a.isJoined(), b.isJoined())
if not result:
return cmp(a.fieldName(), b.fieldName())
return result
pcols = [QUOTE(table_name, pcol.fieldName()) for pcol in schema.primaryColumns()]
expand_tree = lookup.expandtree()
expanded = bool(expand_tree)
joined = []
columns = []
i18n_columns = []
group_by = set()
if lookup.where:
query_columns = lookup.where.columns(schema)
else:
query_columns = []
for column in sorted(schema.columns(), cmpcol):
if lookup.columns and \
not (column.name() in lookup.columns or
column.fieldName() in lookup.columns or
column in lookup.columns):
use_column = False
else:
use_column = True
if column.isAggregate():
if use_column or column in query_columns:
aggr_sql = SELECT_AGGREGATE(column, GLOBALS=GLOBALS, IO=IO)
group_by.update(pcols)
joined.append(aggr_sql)
if use_column:
columns.append(GLOBALS['join_column'])
elif column.isJoined():
if use_column or column in query_columns:
aggr_sql = SELECT_JOINER(column, GLOBALS=GLOBALS, IO=IO)
group_by.update(pcols)
joined.append(aggr_sql)
if use_column:
columns.append(GLOBALS['join_column'])
elif use_column and column.shortcut() and not isinstance(column.schema(), orb.ViewSchema):
raise NotImplementedError('Shortcuts are not supported in PostgreSQL yet.')
elif use_column and column.isTranslatable():
if options.inflated or options.locale == 'all':
# process translation logic
col_sql = 'hstore_agg(hstore(`i18n`.`locale`, `i18n`.`{0}`)) AS `{1}`'
i18n_columns.append(col_sql.format(column.fieldName(), column.fieldName()))
group_by.add('`{0}`.`{1}`'.format(table_name, ID))
GLOBALS['field_mapper'][column] = '`i18n`.`{0}`'.format(column.fieldName())
else:
col_sql = '(array_agg(`i18n`.`{0}`))[1] AS `{1}`'
i18n_columns.append(col_sql.format(column.fieldName(), column.fieldName()))
group_by.add('`{0}`.`{1}`'.format(table_name, ID))
IO['locale'] = options.locale
GLOBALS['field_mapper'][column] = '`i18n`.`{0}`'.format(column.fieldName())
elif not column.isProxy() and use_column:
query_columns.append(column)
# expand a reference column
if column.isReference() and column.name() in expand_tree:
tree = expand_tree.pop(column.name())
col_sql = SELECT_EXPAND(column=column, lookup=lookup, options=options, tree=tree, GLOBALS=GLOBALS, IO=IO)
if col_sql:
columns.append(col_sql)
# or, just return the base record
columns.append('`{0}`.`{1}` AS `{2}`'.format(table_name,
column.fieldName(),
column.fieldName()))
# include any additional expansions from pipes or reverse lookups
if expand_tree:
# include pipes
for pipe in schema.pipes():
name = pipe.name()
tree = expand_tree.pop(name, None)
if tree is not None:
col_sql = SELECT_EXPAND(pipe=pipe, lookup=lookup, options=options, tree=tree, GLOBALS=GLOBALS, IO=IO)
if col_sql:
columns.append(col_sql)
if not expand_tree:
break
# include reverse lookups
if expand_tree:
for reverseLookup in schema.reverseLookups():
name = reverseLookup.reversedName()
tree = expand_tree.pop(name, None)
if tree is not None:
col_sql = SELECT_EXPAND(reverseLookup=reverseLookup, lookup=lookup, options=options, tree=tree, GLOBALS=GLOBALS, IO=IO)
if col_sql:
columns.append(col_sql)
if not expand_tree:
break
if lookup.where:
try:
where = WHERE(schema, lookup.where, GLOBALS=GLOBALS, IO=IO)
except orb.errors.QueryIsNull:
where = orb.errors.QueryIsNull
else:
where = ''
if lookup.order:
used = set()
order_by = []
for col, direction in lookup.order:
col_obj = schema.column(col)
if not col_obj:
continue
default = '`{0}`.`{1}`'.format(table_name, col_obj.fieldName())
field = GLOBALS['field_mapper'].get(col_obj, default)
if field != default:
group_by.add(field)
order_by.append('{0} {1}'.format(field, direction.upper()))
else:
order_by = []
%>
% if (columns or i18n_columns) and where != orb.errors.QueryIsNull:
SELECT ${'DISTINCT' if lookup.distinct else ''}
${',\n '.join(columns+i18n_columns)}
FROM `${table_name}`
${'\n'.join(joined) if joined else ''}
% if i18n_columns:
% if options.inflated or options.locale == 'all':
LEFT JOIN `${table_name}_i18n` AS `i18n` ON (
`i18n`.`${table_name}_id` = `${ID}`
)
% else:
LEFT JOIN `${table_name}_i18n` AS `i18n` ON (
`i18n`.`${table_name}_id` = `${table_name}`.`${ID}` AND `i18n`.`locale` = %(locale)s
)
% endif
% endif
% if expanded:
% if where or order_by or lookup.start or lookup.limit:
WHERE `${table_name}`.`id` IN (
SELECT DISTINCT ${'ON ({0}) '.format(', '.join([col.split(' ')[0] for col in order_by])) if order_by else ''}`${table_name}`.`id`
FROM `${table_name}`
% if i18n_columns:
% if options.inflated or options.locale == 'all':
LEFT JOIN `${table_name}_i18n` AS `i18n` ON (
`i18n`.`${table_name}_id` = `${ID}`
)
% else:
LEFT JOIN `${table_name}_i18n` AS `i18n` ON (
`i18n`.`${table_name}_id` = `${table_name}`.`${ID}` AND `i18n`.`locale` = %(locale)s
)
% endif
% endif
% if where:
WHERE ${where}
% endif
% if group_by:
GROUP BY ${', '.join(list(group_by) + [col.split(' ')[0] for col in order_by])}
% endif
% if order_by:
ORDER BY ${', '.join(order_by)}
% endif
% if lookup.start:
OFFSET ${lookup.start}
% endif
% if lookup.limit > 0:
LIMIT ${lookup.limit}
% endif
)
% endif
% if group_by:
GROUP BY ${', '.join(group_by)}
% endif
% else:
% if where:
WHERE ${where}
% endif
% if group_by:
GROUP BY ${', '.join(group_by)}
% endif
% if order_by:
ORDER BY ${', '.join(order_by)}
% endif
% if lookup.start:
OFFSET ${lookup.start}
% endif
% if lookup.limit > 0:
LIMIT ${lookup.limit}
% endif
% endif
;
% endif
-- ensure hstore is installed
CREATE EXTENSION IF NOT EXISTS hstore;
-- define the hstore_agg aggregate
DROP AGGREGATE IF EXISTS hstore_agg(hstore);
CREATE AGGREGATE hstore_agg(hstore) (
sfunc=hs_concat,
stype=hstore
);
-- define the array_sort method
CREATE OR REPLACE FUNCTION array_sort (ANYARRAY)
RETURNS ANYARRAY LANGUAGE SQL
AS $$
SELECT ARRAY(
SELECT $1[s.i] AS `foo`
FROM
generate_series(array_lower($1,1), array_upper($1,1)) AS s(i)
ORDER BY foo
);
$$;
<%
table_name = schema.dbname()
pcols = ['`{0}`'.format(pcol.fieldName()) for pcol in schema.primaryColumns()]
%>
% for record, columns in changes:
<%
updates = []
translation_updates = {}
translation_columns = {}
translation_values = {}
pkey = str(len(IO))
IO[pkey] = record.primaryKey()
for column in columns:
if column.isTranslatable():
for locale, value in record.recordValue(column.name(), locale='all').items():
key = str(len(IO))
IO[key] = SQL.datastore().store(column, value)
translation_updates.setdefault(locale, [])
translation_columns.setdefault(locale, [])
translation_values.setdefault(locale, [])
translation_updates[locale].append('`{0}` = %({1})s'.format(column.fieldName(), key))
translation_columns[locale].append('`{0}`'.format(column.fieldName()))
translation_values[locale].append('%({0})s'.format(key))
else:
key = str(len(IO))
IO[key] = SQL.datastore().store(column,
record.recordValue(column.name()))
updates.append('`{0}` = %({1})s'.format(column.fieldName(), key))
%>
% if updates:
UPDATE `${table_name}`
SET ${',\n '.join(updates)}
WHERE (${','.join(pcols)}) = %(${pkey})s;
% endif
% if translation_updates:
% for locale in translation_updates:
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM `${table_name}_i18n`
WHERE `${table_name}_id` = %(${pkey})s AND `locale` = '${locale}'
)
THEN
INSERT INTO `${table_name}_i18n` (`${table_name}_id`, `locale`, ${', '.join(translation_columns[locale])})
VALUES (%(${pkey})s, '${locale}', ${', '.join(translation_values[locale])});
ELSE
UPDATE `${table_name}_i18n`
SET ${',\n '.join(translation_updates[locale])}
WHERE `${table_name}_id` = %(${pkey})s AND `locale` = '${locale}';
END IF;
END$$;
% endfor
% endif
% endfor
% if orb.Query.typecheck(value):
<%
val_schema = value.table().schema()
val_col = value.column()
query_field = QUOTE(val_schema.dbname(), val_col.fieldName())
%>
% if query.isInverted():
${query_field} ${op} ${field}
% else:
${field} ${op} ${query_field}
% endif
% elif value is None:
% if operator == orb.Query.Op.Is:
${field} IS NULL
% elif operator == orb.Query.Op.IsNot:
${field} IS NOT NULL
% endif
% elif orb.RecordSet.typecheck(value):
<% SELECT = SQL.byName('SELECT') %>
${field} ${op} (
${SELECT(value, default_columns=['id'], IO=IO).strip(';')}
)
% else:
<%
if operator in (orb.Query.Op.IsIn, orb.Query.Op.IsNotIn) and not value:
raise orb.errors.QueryIsNull()
ID = orb.system.settings().primaryField()
key = str(len(IO))
key_id = '%({0})s'.format(key)
if query.operatorType() in (query.Op.Contains, query.Op.DoesNotContain):
store_value = '%{0}%'.format(value)
elif query.operatorType() in (query.Op.Startswith, query.Op.DoesNotStartwith):
store_value = '{0}%'.format(value)
elif query.operatorType() in (query.Op.Endswith, query.Op.DoesNotEndwith):
store_value = '%{0}'.format(value)
else:
store_value = value
IO[key] = SQL.datastore().store(column, store_value)
op_sql = op
field_sql = field
if 'ILIKE' in op:
op_sql = op_sql.replace('ILIKE', 'LIKE')
key_id = 'lower({0})'.format(key_id)
field_sql = 'lower({0})'.format(field_sql)
%>
% if column.isTranslatable():
## check to see if this column has already been merged
% if column in GLOBALS['field_mapper']:
${field_sql} ${op_sql} %(${key})s
% else:
<% table_name = column.schema().dbname() %>
"${table_name}"."${ID}" IN (
SELECT "${table_name}_id`
FROM `${table_name}_i18n`
% if query.isInverted():
WHERE %(${key})s ${op_sql} `${table_name}_i18n`.`${column.fieldName()}`
% else:
WHERE `${table_name}_i18n`.`${column.fieldName()}` ${op} %(${key})s
% endif
)
% endif
% elif query.isInverted():
${key_id} ${op_sql} ${field_sql}
% else:
${field_sql} ${op_sql} ${key_id}
% endif
% endif
"""
Defines the data store logic for MySQL databases.
"""
from orb import DataStore
class MySQLDataStore(DataStore):
pass
DataStore.registerAddon('MySQL', MySQLDataStore)
""" Defines the backend connection class for PostgreSQL databases. """
from .connection import PSQLConnection
from .sql import PSQL
from .store import PSQLDataStore
""" Defines the backend connection class for PostgreSQL databases. """
import datetime
import logging
import os
import orb
import re
import traceback
from orb import errors
from projex.text import nativestring as nstr
from ..abstractconnection import SQLConnection
from ..abstractsql import SQL
log = logging.getLogger(__name__)
try:
import psycopg2 as pg
from psycopg2.extras import DictCursor, register_hstore, register_json
from psycopg2.extensions import QueryCanceledError
except ImportError:
log.debug('For PostgreSQL backend, download the psycopg2 module')
QueryCanceledError = errors.DatabaseError
DictCursor = None
register_hstore = None
register_json = None
pg = None
# ----------------------------------------------------------------------
# noinspection PyAbstractClass
class PSQLConnection(SQLConnection):
"""
Creates a PostgreSQL backend connection type for handling database
connections to PostgreSQL databases.
"""
# ----------------------------------------------------------------------
# PROTECTED METHODS
# ----------------------------------------------------------------------
def _execute(self, command, data=None, autoCommit=True, autoClose=True,
returning=True, mapper=dict):
"""
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
autoCommit | <bool> | commit database changes immediately
autoClose | <bool> | closes connections immediately
:return [{<str> key: <variant>, ..}, ..], <int> count
"""
rowcount = 0
if data is None:
data = {}
# when in debug mode, simply log the command to the log
elif self.database().commandsBlocked():
log.info(command)
return [], rowcount
# create a new cursor for this transaction
db = self.nativeConnection()
if db is None:
raise errors.ConnectionLost()
# check to make sure the connection hasn't been reset or lost
try:
cursor = db.cursor(cursor_factory=DictCursor)
except pg.InterfaceError as err:
if self.open(force=True):
cursor = db.cursor(cursor_factory=DictCursor)
else:
raise err
# register the hstore option
try:
register_hstore(cursor, unicode=True)
except pg.ProgrammingError:
log.warning('HSTORE is not supported in this version of Postgres!')
# register the json option
try:
register_json(cursor)
except pg.ProgrammingError:
log.warning('JSON is not supported in this version of Postgres!')
start = datetime.datetime.now()
try:
cursor.execute(command, data)
rowcount = cursor.rowcount
# look for a cancelled query
except QueryCanceledError as cancelled:
try:
db.rollback()
except StandardError as err:
log.error('Rollback error: {0}'.format(err))
log.critical(command)
if data:
log.critical(str(data))
# raise more useful errors
if 'statement timeout' in str(cancelled):
raise errors.QueryTimeout(command, (datetime.datetime.now() - start).total_seconds())
else:
raise errors.Interruption()
# look for a disconnection error
except pg.InterfaceError:
raise errors.ConnectionLost()
# look for integrity errors
except (pg.IntegrityError, pg.OperationalError), err:
try:
db.rollback()
except StandardError:
pass
# look for a duplicate error
duplicate_error = re.search('Key (.*) already exists.', nstr(err))
if duplicate_error:
key = duplicate_error.group(1)
result = re.match('^\(lower\((?P<column>[^\)]+)::text\)\)=\((?P<value>[^\)]+)\)$', key)
if not result:
result = re.match('^(?P<column>\w+)=(?P<value>\w+)', key)
if result:
msg = '{value} is already being used.'.format(**result.groupdict())
raise errors.DuplicateEntryFound(msg)
else:
raise errors.DuplicateEntryFound(duplicate_error.group())
# look for a reference error
reference_error = re.search('Key .* is still referenced from table ".*"', nstr(err))
if reference_error:
msg = 'Cannot remove this record, it is still being referenced.'
raise errors.CannotDelete(msg)
# unknown error
log.debug(traceback.print_exc())
raise errors.QueryFailed(command, data, nstr(err))
# connection has closed underneath the hood
except pg.Error, err:
try:
db.rollback()
except StandardError:
pass
log.error(traceback.print_exc())
raise errors.QueryFailed(command, data, nstr(err))
try:
results = [mapper(record) for record in cursor.fetchall()]
except pg.ProgrammingError:
results = []
if autoCommit:
self.commit()
if autoClose:
cursor.close()
return results, rowcount
def _open(self, db):
"""
Handles simple, SQL specific connection creation. This will not
have to manage thread information as it is already managed within
the main open method for the SQLBase class.
:param db | <orb.Database>
:return <variant> | backend specific database connection
"""
if not pg:
raise errors.BackendNotFound('psycopg2 is not installed.')
dbname = db.databaseName()
user = db.username()
pword = db.password()
host = db.host()
if not host:
host = 'localhost'
port = db.port()
if not port:
port = 5432
os.environ['PGOPTIONS'] = '-c statement_timeout={0}'.format(db.maximumTimeout())
# create the python connection
try:
return pg.connect(database=dbname,
user=user,
password=pword,
host=host,
port=port)
except pg.OperationalError, err:
log.error(err)
raise errors.ConnectionFailed('Failed to connect to Postgres', db)
def _interrupt(self, threadId, connection):
"""
Interrupts the given native connection from a separate thread.
:param threadId | <int>
connection | <variant> | backend specific database.
"""
try:
connection.cancel()
except pg.Error:
pass
# ----------------------------------------------------------------------
@classmethod
def sql(cls, code=''):
"""
Returns the statement interface for this connection.
:param code | <str>
:return subclass of <orb.core.backends.sql.SQLStatement>
"""
if code:
return SQL.byName('Postgres').byName(code)
else:
return SQL.byName('Postgres')
# register the postgres backend
if pg:
orb.Connection.registerAddon('Postgres', PSQLConnection)
"""
Defines the base PSQL class used for all Postgres based statements.
"""
import orb
from ..abstractsql import SQL
from .statements import __plugins__
class PSQL(SQL):
@classmethod
def createDatastore(cls):
"""
Returns the PSQL data store class.
:return subclass of <orb.DataStore>
"""
store_type = orb.DataStore.byName('Postgres', orb.DataStore)
return store_type()
# load the SQL plugins for PostgreSQL
PSQL.loadStatements(__plugins__)
# ----------------------------------------------------------------------
# define custom column types for PostgreSQL
PSQL.registerAddon('Type::Bool', u'BOOLEAN')
PSQL.registerAddon('Type::BigInt', u'BIGINT')
PSQL.registerAddon('Type::ByteArray', u'BYTEA')
PSQL.registerAddon('Type::Color', u'CHARACTER VARYING')
PSQL.registerAddon('Type::Datetime', u'TIMESTAMP WITHOUT TIME ZONE')
PSQL.registerAddon('Type::DatetimeWithTimezone', u'TIMESTAMP WITHOUT TIME ZONE')
PSQL.registerAddon('Type::Decimal', u'DECIMAL')
PSQL.registerAddon('Type::Dict', u'BYTEA')
PSQL.registerAddon('Type::Directory', u'CHARACTER VARYING')
PSQL.registerAddon('Type::Double', u'DOUBLE PRECISION')
PSQL.registerAddon('Type::Email', u'CHARACTER VARYING')
PSQL.registerAddon('Type::Enum', u'INTEGER')
PSQL.registerAddon('Type::Filepath', u'CHARACTER VARYING')
PSQL.registerAddon('Type::ForeignKey', u'BIGINT')
PSQL.registerAddon('Type::Image', u'BYTEA')
PSQL.registerAddon('Type::Integer', u'INTEGER')
PSQL.registerAddon('Type::Interval', u'TIMEDELTA')
PSQL.registerAddon('Type::Password', u'CHARACTER VARYING')
PSQL.registerAddon('Type::Pickle', u'BYTEA')
PSQL.registerAddon('Type::String', u'CHARACTER VARYING')
PSQL.registerAddon('Type::Url', u'CHARACTER VARYING')
# register the PSQLStatement addon interface to the base SQLStatement class
SQL.registerAddon('Postgres', PSQL)
# register the statements addons for PSQL
from .statements import __plugins__
PSQL.registerAddonModule(__plugins__)
% if column.testFlag(column.Flags.AutoIncrement):
ADD COLUMN "${field}" SERIAL
% elif reference:
ADD COLUMN "${field}" ${type} REFERENCES "${reference}"
% elif type == 'CHARACTER VARYING' and max_length:
ADD COLUMN "${field}" CHARACTER VARYING(${max_length}) ${' '.join(flags)}
% else:
ADD COLUMN "${column.fieldName()}" ${type} ${' '.join(flags)}
% endif
% if added['base']:
-- update the table
ALTER TABLE "${table}"
% for column in added['base'][:-1]:
${ADD_COLUMN(column)},
% endfor
${ADD_COLUMN(added['base'][-1])}
;
% endif
% if added['i18n']:
-- ensure the translation table exists (in case this is the first set of columns)
CREATE TABLE IF NOT EXISTS "${table}_i18n" (
-- define the pkey columns
"locale" CHARACTER VARYING(5),
"${table}_id" BIGINT REFERENCES "${table}" (${u','.join([QUOTE(col.fieldName()) for col in added['primary']])}) ON DELETE CASCADE,
-- define the pkey constraints
CONSTRAINT "${table}_i18n_pkey" PRIMARY KEY ("locale", "${table}_id")
) WITH (OIDS=FALSE);
ALTER TABLE "${table}_i18n" OWNER TO "${owner}";
-- add the missing columns to the translation table
ALTER TABLE "${table}_i18n"
% for column in added['i18n'][:-1]:
${ADD_COLUMN(column)},
% endfor
${ADD_COLUMN(added['i18n'][-1])}
;
% endif
## create any indexes for this new table
% for column in schema.columns():
% if column.indexed() and not column.primary():
${CREATE_INDEX(column, checkExists=True, GLOBALS=GLOBALS, IO=IO)}
% endif
% endfor
% for index in schema.indexes():
${CREATE_INDEX(index, checkExists=True, GLOBALS=GLOBALS, IO=IO)}
% endfor
<%
import projex.text
columns = []
if column:
index_name = projex.text.underscore(column.indexName().lstrip('by')) + '_idx'
table_name = column.schema().dbname()
unique = column.unique()
col_table_name = table_name if not column.isTranslatable() else table_name + '_i18n'
if not column.testFlag(column.Flags.CaseSensitive) and column.isString():
columns.append('lower("{0}")'.format(column.fieldName()))
else:
columns.append('"{0}"'.format(column.fieldName()))
else:
index_name = projex.text.underscore(index.name().lstrip('by')) + '_idx'
table_name = index.schema().dbname()
unique = index.unique()
cols = [index.schema().column(col_name) for col_name in index.columnNames()]
translatable = {col.isTranslatable() for col in cols}
# can only create a DB index on multiple columns if they're on the same table
if len(translatable) == 1:
if list(translatable)[0]:
table_name += '_i18n'
for col in cols:
if not col.testFlag(col.Flags.CaseSensitive) and col.isString():
columns.append('lower("{0}")'.format(col.fieldName()))
else:
columns.append('"{0}"'.format(col.fieldName()))
%>
% if columns:
% if checkExists:
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_indexes
WHERE schemaname = 'public'
AND indexname = '${table_name}_${index_name}'
) THEN CREATE ${'UNIQUE' if unique else ''} INDEX ${table_name}_${index_name} ON "${table_name}" (${', '.join(columns)});
END IF;
END$$;
% else:
CREATE ${'UNIQUE' if unique else ''} INDEX ${table_name}_${index_name} ON "${table_name}" (${', '.join(columns)});
% endif
% else:
% endif
-- create the table
CREATE TABLE IF NOT EXISTS "${table}" (
-- define the columns
% for i, column in enumerate(columns['base'] + columns['primary']):
${ADD_COLUMN(column).replace('ADD COLUMN ', '')},
% endfor
% if columns['primary']:
-- define the primary key constraint
CONSTRAINT "${table}_pkey" PRIMARY KEY (${u','.join([QUOTE(col.fieldName()) for col in columns['primary']])})
% endif
)
% if inherits:
INHERITS ("${inherits}")
% endif
WITH (OIDS=FALSE);
ALTER TABLE "${table}" OWNER TO "${owner}";
% if columns['i18n']:
-- create the translations table
CREATE TABLE "${table}_i18n" (
-- define the columns
"locale" CHARACTER VARYING(5),
"${table}_id" BIGINT REFERENCES "${table}" (${u','.join([QUOTE(col.fieldName()) for col in columns['primary']])}) ON DELETE CASCADE,
% for column in columns['translations']:
${ADD_COLUMN(column).replace('ADD COLUMN ', '')},
% endfor
-- define the constraints
CONSTRAINT "${table}_i18n_pkey" PRIMARY KEY ("${table}_id", "locale")
)
WITH (OIDS=FALSE);
ALTER TABLE "${table}_i18n" OWNER TO "${owner}";
% endif
<%
import projex.text
from collections import OrderedDict
from orb import errors
primary = schema.primaryColumns()[0]
reference_model = primary.referenceModel()
reference_table = reference_model.schema().dbname()
preload = {}
columns = []
group_by = []
joins = OrderedDict()
def populate(schema, source, parts, alias):
next_part = parts.pop(0)
# look for an aggregate
column = schema.column(next_part)
# join in a column
if column:
if not column.isReference():
return '{0}.{1}'.format(alias, QUOTE(column.fieldName()))
else:
ref_model = column.referenceModel()
alias = alias or ref_model.schema().dbname()
join_alias = alias + '_' + projex.text.underscore(column.name())
target = '{0}.{1}'.format(join_alias, QUOTE(ref_model.schema().primaryColumns()[0].fieldName()))
source = '{0}.{1}'.format(alias, QUOTE(column.fieldName()))
join = {'table': QUOTE(ref_model.schema().dbname()),
'alias': join_alias,
'on': '{0} = {1}'.format(target, source)}
joins.setdefault(join_alias, join)
group_by.append(target)
if not parts:
return target
else:
return populate(ref_model.schema(), target, parts, join_alias)
# join in a lookup or pipe
else:
pipe = schema.pipe(next_part)
if not pipe:
rev_lookup = schema.reverseLookup(next_part)
if not rev_lookup:
raise errors.ColumnNotFound(schema.name(), next_part)
try:
record_part = parts.pop(0)
invert_dir = record_part == 'last'
except IndexError:
raise errors.QueryInvalid('Cannot join in a record set for a View.')
# define x here or mako will not render this template (preprocessor fails to find the
# value within list compression)
x = 0
if pipe:
join_schema = pipe.targetReferenceModel().schema()
join_primary = join_schema.primaryColumns()[0]
pipe_schema = pipe.pipeReferenceModel().schema()
source_col = pipe_schema.column(pipe.sourceColumn())
target_col = pipe_schema.column(pipe.targetColumn())
join_field = QUOTE(source_col.fieldName())
column_name = projex.text.underscore(pipe.name())
order = join_schema.defaultOrder() or [(join_primary.name(), 'asc')]
if invert_dir:
order = [(x[0], 'asc' if x[1] == 'desc' else 'desc') for x in order]
order = [QUOTE(join_schema.dbname(), join_schema.column(x[0]).fieldName()) + ' ' + x[1].upper()
for x in order]
opts = (
QUOTE(source_col.fieldName()),
QUOTE(join_schema.dbname()),
QUOTE(pipe_schema.dbname()),
QUOTE(target_col.fieldName()),
QUOTE(join_schema.dbname(), join_primary.fieldName()),
', '.join(order)
)
if record_part in ('first', 'last'):
join_table = QUOTE('_'.join((join_schema.dbname(), column_name, record_part)))
preload_as = '(SELECT DISTINCT ON (j.{0}) {1}.*, j.{0} FROM {1} ' \
'LEFT JOIN {2} j ON j.{3} = {4} ORDER BY j.{0}, {5})'
preload.setdefault(join_table, preload_as.format(*opts))
else:
join_table = QUOTE(join_schema.dbname())
else:
join_schema = rev_lookup.schema()
join_primary = join_schema.primaryColumns()[0]
join_field = QUOTE(rev_lookup.fieldName())
column_name = projex.text.underscore(rev_lookup.reversedName())
order = join_schema.defaultOrder() or [(join_primary.name(), 'asc')]
if invert_dir:
order = [(x[0], 'asc' if x[1] == 'desc' else 'desc') for x in order]
order = [QUOTE(join_schema.dbname(), join_schema.column(x[0]).fieldName()) + ' ' + x[1].upper()
for x in order]
opts = (
QUOTE(rev_lookup.fieldName()),
QUOTE(join_schema.dbname()),
QUOTE(join_primary.fieldName()),
', '.join(order)
)
if record_part in ('first', 'last'):
join_table = QUOTE('_'.join((join_schema.dbname(), column_name, record_part)))
preload_as = '(SELECT DISTINCT ON ({0}) {1}.* FROM {1} ORDER BY {0}, {3})'
preload.setdefault(join_table, preload_as.format(*opts))
else:
join_table = QUOTE(join_schema.dbname())
join_alias = '_'.join((join_schema.dbname(), column_name, record_part))
target = '{0}.{1}'.format(join_alias, QUOTE(join_primary.fieldName()))
join = {'table': join_table,
'alias': join_alias,
'on': '{0}.{1} = {2}'.format(join_alias, join_field, source)}
joins.setdefault(join_alias, join)
if record_part == 'count':
return 'count({0}.*)'.format(join_alias)
elif record_part == 'ids':
return 'array_agg({0})'.format(target)
elif not parts:
group_by.append(target)
return target
else:
return populate(join_schema, target, parts, join_alias)
curr_schema = reference_model.schema()
curr_field = QUOTE(reference_table, reference_model.schema().primaryColumns()[0].fieldName())
columns.append('{0} AS {1}'.format(curr_field, QUOTE(primary.fieldName())))
group_by.append(curr_field)
for column in schema.columns():
if column.primary():
continue
parts = column.shortcut().split('.')
if not (len(parts) > 1 and parts[0] == primary.name()):
msg = 'Invalid column ({0}) on View ({1}). All columns must be shortcuts.'
raise errors.OrbError(msg.format(column.name(), curr_schema.name()))
field_name = populate(curr_schema, curr_field, parts[1:], curr_schema.dbname())
columns.append('{0} AS {1}'.format(field_name, column.fieldName()))
%>
DROP ${'MATERIALIZED' if schema.isStatic() else ''} VIEW IF EXISTS "${schema.dbname()}";
CREATE ${'MATERIALIZED' if schema.isStatic() else ''} VIEW "${schema.dbname()}" AS (
% if preload:
WITH
% for i, (preload_table, preload_as) in enumerate(preload.items()):
${preload_table if not i else ',' + preload_table} AS ${preload_as}
% endfor
% endif
SELECT ${',\n '.join(columns)}
FROM "${reference_table}"
% for join in joins.values():
LEFT JOIN ${join['table']} AS ${join['alias']} ON ${join['on']}
% endfor
GROUP BY ${',\n '.join(group_by)}
);
DELETE FROM "${table}"
% if where:
WHERE ${where}
% endif
RETURNING *;
% if not table:
SET unique_checks=${int(enabled)};
SET foreign_key_checks=${int(enabled)};
% else:
% if enabled:
ALTER TABLE "${table}" ENABLE KEYS;
% else:
ALTER TABLE "${table}" DISABLE KEYS;
% endif
% endif
% if insertions['base']:
INSERT INTO "${table}" (
${','.join(['"{0}"'.format(column.fieldName()) for column in columns['base']])}
)
VALUES
% for row in insertions['base'][:-1]:
(${','.join(row)}),
% endfor
(${','.join(insertions['base'][-1])})
RETURNING id;
% endif
% if insertions['i18n']:
<% count = len(insertions['i18n']) %>
INSERT INTO "${table}_i18n" (
"${table}_id", "locale", ${','.join(['"{0}"'.format(column.fieldName()) for column in columns['i18n']])}
)
VALUES
% for i, row in enumerate(insertions['i18n'][:-1]):
(LASTVAL() - ${count - (i+1)}, %(locale)s, ${','.join(row)}),
%endfor
(LASTVAL(), %(locale)s, ${','.join(insertions['i18n'][-1])})
RETURNING "${table}_id" AS "id";
% endif
${joiner.join([u'"{0}"'.format(t) for t in text])}
SELECT t.table_name AS name,
(
SELECT array_agg(c.column_name::varchar)
FROM information_schema.columns AS c
WHERE c.table_schema = '${namespace}'
AND c.table_name IN (t.table_name, t.table_name || '_i18n')
) AS "columns",
(
SELECT array_agg(i.indexname)
FROM pg_indexes AS i
WHERE i.schemaname = '${namespace}'
AND i.tablename IN (t.table_name, t.table_name || '_i18n')
) AS "indexes"
FROM information_schema.tables AS t
WHERE t.table_schema = '${namespace}'
AND t.table_name NOT ILIKE '%%_i18n';
<%
WHERE = SQL.byName('WHERE')
aggr = column.aggregator()
src_table = column.schema().dbname()
ref_col = aggr.referenceColumn()
ref_table = ref_col.schema().dbname()
targ_col = aggr.targetColumn()
query = aggr.where(column)
if query is not None:
where = WHERE(ref_col.schema(), query, GLOBALS=GLOBALS, IO=IO)
else:
where = None
pcols = []
for pcol in column.schema().primaryColumns():
pcols.append('"{0}"."{1}"'.format(src_table, pcol.fieldName()))
# create the join table name
join_count = GLOBALS.get('join_count', 0)
join_count += 1
join_table = 'join_{0}'.format(join_count)
join_col = 'SUM(COALESCE("{0}"."{1}",0)) AS "{1}"'.format(join_table,
column.name())
GLOBALS['join_count'] = join_count
GLOBALS['join_table'] = join_table
GLOBALS['join_column'] = join_col
GLOBALS.setdefault('field_mapper', {})
agg_type = aggr.aggregateType()
if agg_type == orb.QueryAggregate.Type.Count:
command = 'COUNT(*)'
elif agg_type == orb.QueryAggregate.Type.Sum:
command = 'SUM("{0}")'.format(targ_col.fieldName())
elif agg_type == orb.QueryAggregate.Type.Maximum:
command = 'MAX("{0}")'.format(targ_col.fieldName())
elif agg_type == orb.QueryAggregate.Type.Minimum:
command = 'MIN("{0}")'.format(targ_col.fieldName())
else:
raise orb.QueryInvalid('Invalid aggregate type.')
GLOBALS['field_mapper'][column] = '"{0}"."{1}"'.format(join_table, column.name())
%>
LEFT JOIN (
SELECT "${ref_col.fieldName()}", ${command} AS "${column.name()}"
FROM "${ref_table}"
% if where:
WHERE ${where}
% endif
GROUP BY "${ref_col.fieldName()}"
) AS "${join_table}"
ON "${join_table}"."${ref_col.fieldName()}" = (${','.join(pcols)})
<%
SELECT = SQL.byName('SELECT')
# only select the primary keys if we're only interested in the row count
if not lookup.columns:
lookup.columns = [col.name() for col in table.schema().primaryColumns()]
select_command = SELECT(table, lookup=lookup, options=options, GLOBALS=GLOBALS, IO=IO).strip(';')
%>
% if select_command:
SELECT COUNT(*) AS count
FROM (
${select_command}
) AS records;
% endif
<%
import projex.text
from orb import errors
WHERE = SQL.byName('WHERE')
def collect_sub_expand(schema, tree):
output = {}
for expand_name, sub_tree in tree.items():
# cannot expand these keywords
if expand_name in ('ids', 'count'):
continue
# these keywords are expanded via their sub-tree
elif expand_name in ('first', 'last'):
output.update(collect_sub_expand(schema, sub_tree))
# lookup a column, pipe, or reverseLookup
else:
column = schema.column(expand_name)
if column:
output[('column', column)] = sub_tree
continue
pipe = schema.pipe(expand_name)
if pipe:
output[('pipe', pipe)] = sub_tree
continue
rev_lookup = schema.reverseLookup(expand_name)
if lookup:
output[('reverseLookup', rev_lookup)] = sub_tree
continue
raise errors.ColumnNotFound(schema.name(), expand_name)
return output
%>
% if pipe:
<%
source_table = pipe.sourceReferenceModel()
join_table = pipe.pipeReferenceModel()
source_column = join_table.schema().column(pipe.sourceColumn())
target_column = join_table.schema().column(pipe.targetColumn())
target_table = pipe.targetReferenceModel()
table_name = target_table.schema().dbname()
has_translations = target_table.schema().hasTranslations()
columns = []
colname = pipe.name()
col_name = projex.text.underscore(colname)
alias = col_name + '_table'
source_primary = source_table.schema().primaryColumn().fieldName()
target_primary = target_column.schema().primaryColumn().fieldName()
records_alias = col_name + '_records'
record_primary = target_table.schema().primaryColumn().fieldName()
# include the base table's filter, if one exists
target_filter = target_table.baseQuery(lookup=lookup, options=options)
if target_filter is not None:
base_where = '(' + WHERE(target_table.schema(), target_filter, schema_alias=alias, GLOBALS=GLOBALS, IO=IO) + ') AND '
else:
base_where = ''
source_table_name = source_alias or source_table.schema().dbname()
if 'ids' in tree:
columns.append('array_agg({0}.{1}) AS ids'.format(records_alias, target_primary))
if 'count' in tree:
columns.append('count({0}.*) AS count'.format(records_alias))
if 'first' in tree:
columns.append('(array_agg(row_to_json({0}.*)))[1] AS first'.format(records_alias)),
if 'last' in tree:
columns.append('(array_agg(row_to_json({0}.*)))[count({0}.*)] AS last'.format(records_alias)),
if 'records' in tree or not columns:
columns.append('array_agg(row_to_json({0}.*)) AS records'.format(records_alias))
%>
(
SELECT row_to_json(${col_name}_row) FROM (
SELECT ${', '.join(columns)}
FROM (
% if not has_translations:
SELECT ${alias}.*
% else:
SELECT ${alias}.*, ${alias}_i18n.*
% endif
% for (type, object), sub_tree in collect_sub_expand(target_table.schema(), tree).items():
,${SELECT_EXPAND(**{type: object, 'tree': sub_tree, 'source_alias': alias, 'GLOBALS': GLOBALS, 'IO': IO, 'lookup': lookup, 'options': options})}
% endfor
FROM "${table_name}" AS "${alias}"
% if has_translations:
LEFT JOIN "${table_name}_i18n" AS "${alias}_i18n" ON ${alias}.${target_primary} = ${alias}_i18n.${table_name}_id AND ${alias}_i18n.locale = '${options.locale}'
% endif
WHERE ${base_where} "${alias}"."${target_primary}" IN (
SELECT DISTINCT ON (j."${target_column.fieldName()}") j."${target_column.fieldName()}"
FROM "${join_table.schema().dbname()}" AS j
WHERE j."${source_column.fieldName()}" = "${source_table_name}"."${source_primary}"
${'LIMIT 1' if pipe.unique() else ''}
)
) ${records_alias}
) ${col_name}_row
) AS "${colname}"
% elif reverseLookup:
<%
source_schema = reverseLookup.schema()
source_column = reverseLookup
table_name = source_schema.dbname()
ref_schema = reverseLookup.referenceModel().schema()
ref_table_name = source_alias or ref_schema.dbname()
has_translations = source_schema.hasTranslations()
colname = reverseLookup.reversedName()
col_name = projex.text.underscore(colname)
alias = col_name + '_table'
records_alias = col_name + '_records'
target_primary = ref_schema.primaryColumn().fieldName()
source_primary = source_schema.primaryColumn().fieldName()
# include the base table's filter, if one exists
target_filter = source_schema.model().baseQuery(lookup=lookup, options=options)
if target_filter is not None:
base_where = '(' + WHERE(ref_schema, target_filter, schema_alias=alias, GLOBALS=GLOBALS, IO=IO) + ') AND '
else:
base_where = ''
columns = []
if 'ids' in tree:
columns.append('array_agg({0}.{1}) AS ids'.format(records_alias, target_primary))
if 'count' in tree:
columns.append('count({0}.*) AS count'.format(records_alias))
if 'first' in tree:
columns.append('(array_agg(row_to_json({0}.*)))[1] AS first'.format(records_alias)),
if 'last' in tree:
columns.append('(array_agg(row_to_json({0}.*)))[count({0}.*)] AS last'.format(records_alias)),
if 'records' in tree or not columns:
columns.append('array_agg(row_to_json({0}.*)) AS records'.format(records_alias))
%>
(
SELECT row_to_json(${col_name}_row) FROM (
SELECT ${', '.join(columns)}
FROM (
% if not has_translations:
SELECT ${alias}.*
% else:
SELECT ${alias}.*, ${alias}_i18n.*
% endif
% for (type, object), sub_tree in collect_sub_expand(source_schema, tree).items():
,${SELECT_EXPAND(**{type: object, 'tree': sub_tree, 'source_alias': alias, 'GLOBALS': GLOBALS, 'IO': IO, 'lookup': lookup, 'options': options})}
% endfor
FROM "${table_name}" AS "${alias}"
% if has_translations:
LEFT JOIN "${table_name}_i18n" AS "${alias}_i18n" ON ${alias}.${target_primary} = ${alias}_i18n.${table_name}_id AND ${alias}_i18n.locale = '${options.locale}'
% endif
WHERE ${base_where} "${alias}"."${source_column.fieldName()}" = "${ref_table_name}".${target_primary}
${'LIMIT 1' if source_column.unique() else ''}
) ${records_alias}
) ${col_name}_row
) AS "${colname}"
% elif column:
<%
reference = column.referenceModel()
ref_table_name = source_alias or column.schema().dbname()
colname = column.name()
table_name = reference.schema().dbname()
col_name = projex.text.underscore(colname)
alias = projex.text.underscore(column.name()) + '_table'
primary = reference.schema().primaryColumn().fieldName()
has_translations = reference.schema().hasTranslations()
# include the base table's filter, if one exists
target_filter = reference.baseQuery(lookup=lookup, options=options)
if target_filter is not None:
base_where = '(' + WHERE(reference.schema(), target_filter, schema_alias=alias, GLOBALS=GLOBALS, IO=IO) + ') AND '
else:
base_where = ''
%>
(
SELECT row_to_json(${col_name}_row) FROM (
% if not has_translations:
SELECT "${alias}".*
% else:
SELECT "${alias}".*, "${alias}_i18n".*
% endif
% for (type, object), sub_tree in collect_sub_expand(reference.schema(), tree).items():
,${SELECT_EXPAND(**{type: object, 'tree': sub_tree, 'source_alias': alias, 'GLOBALS': GLOBALS, 'IO': IO, 'lookup': lookup, 'options': options})}
% endfor
FROM "${table_name}" AS "${alias}"
% if has_translations:
LEFT JOIN "${table_name}_i18n" AS "${alias}_i18n" ON "${alias}"."${primary}" = "${alias}_i18n".${table_name}_id AND ${alias}_i18n.locale = '${options.locale}'
% endif
WHERE ${base_where} "${alias}"."${primary}" = "${ref_table_name}"."${column.fieldName()}"
) ${col_name}_row
) AS "${colname}"
% endif
<%
WHERE = SQL.byName('WHERE')
joiner = column.joiner()
src_table = column.schema().dbname()
ref_col = joiner.referenceColumn()
ref_table = ref_col.schema().dbname()
targ_col = joiner.targetColumn()
query = joiner.where(column)
if query is not None:
where = WHERE(ref_col.schema(), query, GLOBALS=GLOBALS, IO=IO)
else:
where = None
pcols = []
for pcol in column.schema().primaryColumns():
pcols.append('"{0}"."{1}"'.format(src_table, pcol.fieldName()))
# create the join table name
join_count = GLOBALS.get('join_count', 0)
join_count += 1
join_table = 'join_{0}'.format(join_count)
join_col = '(ARRAY_AGG("{0}"."{1}"))[1] AS "{2}"'.format(join_table,
column.name(),
column.name())
GLOBALS.setdefault('field_mapper', {})
GLOBALS['join_count'] = join_count
GLOBALS['join_table'] = join_table
GLOBALS['join_column'] = join_col
GLOBALS['field_mapper'][column] = '"{0}"."{1}"'.format(join_table,
column.name())
%>
LEFT JOIN (
SELECT DISTINCT ON ("${ref_col.fieldName()}")
"${ref_col.fieldName()}", "${targ_col.fieldName()}" AS "${column.name()}"
FROM "${ref_table}"
% if where:
WHERE ${where}
% endif
) AS "${join_table}"
ON "${join_table}"."${ref_col.fieldName()}" = (${','.join(pcols)})
<%
SELECT_AGGREGATE = SQL.byName('SELECT_AGGREGATE')
SELECT_JOINER = SQL.byName('SELECT_JOINER')
SELECT_EXPAND = SQL.byName('SELECT_EXPAND')
SELECT_SHORTCUT = SQL.byName('SELECT_SHORTCUT')
WHERE = SQL.byName('WHERE')
ID = orb.system.settings().primaryField()
GLOBALS['field_mapper'] = {}
schema = table.schema()
table_name = schema.dbname()
primary = schema.primaryColumn().fieldName()
base_query = table.baseQuery(lookup=lookup, options=options)
if base_query:
lookup.where = base_query & lookup.where
def cmpcol(a, b):
result = cmp(a.isAggregate(), b.isAggregate())
if not result:
result = cmp(a.isJoined(), b.isJoined())
if not result:
return cmp(a.fieldName(), b.fieldName())
return result
pcols = [QUOTE(table_name, pcol.fieldName()) for pcol in schema.primaryColumns()]
expand_tree = lookup.expandtree()
expanded = bool(expand_tree)
joined = []
columns = []
i18n_columns = []
group_by = set()
if lookup.where:
query_columns = lookup.where.columns(schema)
else:
query_columns = []
for column in sorted(schema.columns(), cmpcol):
if lookup.columns and \
not (column.name() in lookup.columns or
column.fieldName() in lookup.columns or
column in lookup.columns):
use_column = False
else:
use_column = True
if column.isAggregate():
if use_column or column in query_columns:
aggr_sql = SELECT_AGGREGATE(column, GLOBALS=GLOBALS, IO=IO)
group_by.update(pcols)
joined.append(aggr_sql)
if use_column:
columns.append(GLOBALS['join_column'])
elif column.isJoined():
if use_column or column in query_columns:
aggr_sql = SELECT_JOINER(column, GLOBALS=GLOBALS, IO=IO)
group_by.update(pcols)
joined.append(aggr_sql)
if use_column:
columns.append(GLOBALS['join_column'])
elif use_column and column.shortcut() and not isinstance(column.schema(), orb.ViewSchema):
raise NotImplementedError('Shortcuts are not supported in PostgreSQL yet.')
elif use_column and column.isTranslatable():
if options.inflated or options.locale == 'all':
# process translation logic
col_sql = 'hstore_agg(hstore("i18n"."locale", "i18n"."{0}")) AS "{1}"'
i18n_columns.append(col_sql.format(column.fieldName(), column.fieldName()))
group_by.add('"{0}"."{1}"'.format(table_name, ID))
GLOBALS['field_mapper'][column] = '"i18n"."{0}"'.format(column.fieldName())
else:
col_sql = '(array_agg("i18n"."{0}"))[1] AS "{1}"'
i18n_columns.append(col_sql.format(column.fieldName(), column.fieldName()))
group_by.add('"{0}"."{1}"'.format(table_name, ID))
IO['locale'] = options.locale
GLOBALS['field_mapper'][column] = '"i18n"."{0}"'.format(column.fieldName())
elif not column.isProxy() and use_column:
query_columns.append(column)
# expand a reference column
if column.isReference() and column.name() in expand_tree:
tree = expand_tree.pop(column.name())
col_sql = SELECT_EXPAND(column=column, lookup=lookup, options=options, tree=tree, GLOBALS=GLOBALS, IO=IO)
if col_sql:
columns.append(col_sql)
# or, just return the base record
columns.append('"{0}"."{1}" AS "{2}"'.format(table_name,
column.fieldName(),
column.fieldName()))
# include any additional expansions from pipes or reverse lookups
if expand_tree:
# include pipes
for pipe in schema.pipes():
name = pipe.name()
tree = expand_tree.pop(name, None)
if tree is not None:
col_sql = SELECT_EXPAND(pipe=pipe, lookup=lookup, options=options, tree=tree, GLOBALS=GLOBALS, IO=IO)
if col_sql:
columns.append(col_sql)
if not expand_tree:
break
# include reverse lookups
if expand_tree:
for reverseLookup in schema.reverseLookups():
name = reverseLookup.reversedName()
tree = expand_tree.pop(name, None)
if tree is not None:
col_sql = SELECT_EXPAND(reverseLookup=reverseLookup, lookup=lookup, options=options, tree=tree, GLOBALS=GLOBALS, IO=IO)
if col_sql:
columns.append(col_sql)
if not expand_tree:
break
if lookup.where:
try:
where = WHERE(schema, lookup.where, GLOBALS=GLOBALS, IO=IO)
except orb.errors.QueryIsNull:
where = orb.errors.QueryIsNull
else:
where = ''
if lookup.order:
used = set()
order_by = []
for col, direction in lookup.order:
col_obj = schema.column(col)
if not col_obj:
continue
default = '"{0}"."{1}"'.format(table_name, col_obj.fieldName())
field = GLOBALS['field_mapper'].get(col_obj, default)
if field != default:
group_by.add(field)
order_by.append('{0} {1}'.format(field, direction.upper()))
else:
order_by = []
%>
% if (columns or i18n_columns) and where != orb.errors.QueryIsNull:
SELECT ${'DISTINCT' if lookup.distinct else ''}
${',\n '.join(columns+i18n_columns)}
FROM "${table_name}"
${'\n'.join(joined) if joined else ''}
% if i18n_columns:
% if options.locale == 'all':
LEFT JOIN "${table_name}_i18n" AS "i18n" ON (
"i18n"."${table_name}_id" = "${ID}"
)
% else:
LEFT JOIN "${table_name}_i18n" AS "i18n" ON (
"i18n"."${table_name}_id" = "${table_name}"."${ID}" AND "i18n"."locale" = '${options.locale}'
)
% endif
% endif
% if expanded:
% if where or order_by or lookup.start or lookup.limit:
WHERE "${table_name}"."${primary}" IN (
SELECT DISTINCT ${'ON ({0}) '.format(', '.join([col.split(' ')[0] for col in order_by])) if order_by else ''}"${table_name}"."${primary}"
FROM "${table_name}"
% if i18n_columns:
% if options.locale == 'all':
LEFT JOIN "${table_name}_i18n" AS "i18n" ON (
"i18n"."${table_name}_id" = "${ID}"
)
% else:
LEFT JOIN "${table_name}_i18n" AS "i18n" ON (
"i18n"."${table_name}_id" = "${table_name}"."${ID}" AND "i18n"."locale" = '${options.locale}'
)
% endif
% endif
% if where:
WHERE ${where}
% endif
% if group_by:
GROUP BY ${', '.join(list(group_by) + [col.split(' ')[0] for col in order_by])}
% endif
% if order_by:
ORDER BY ${', '.join(order_by)}
% endif
% if lookup.start:
OFFSET ${lookup.start}
% endif
% if lookup.limit > 0:
LIMIT ${lookup.limit}
% endif
)
% endif
% if group_by:
GROUP BY ${', '.join(group_by)}
% endif
% else:
% if where:
WHERE ${where}
% endif
% if group_by:
GROUP BY ${', '.join(group_by)}
% endif
% if order_by:
ORDER BY ${', '.join(order_by)}
% endif
% if lookup.start:
OFFSET ${lookup.start}
% endif
% if lookup.limit > 0:
LIMIT ${lookup.limit}
% endif
% endif
;
% endif
-- ensure hstore is installed
CREATE EXTENSION IF NOT EXISTS hstore;
-- define the hstore_agg aggregate
DROP AGGREGATE IF EXISTS hstore_agg(hstore);
CREATE AGGREGATE hstore_agg(hstore) (
sfunc=hs_concat,
stype=hstore
);
-- define the array_sort method
CREATE OR REPLACE FUNCTION array_sort (ANYARRAY)
RETURNS ANYARRAY LANGUAGE SQL
AS $$
SELECT ARRAY(
SELECT $1[s.i] AS "foo"
FROM
generate_series(array_lower($1,1), array_upper($1,1)) AS s(i)
ORDER BY foo
);
$$;
<%
table_name = schema.dbname()
pcols = ['"{0}"'.format(pcol.fieldName()) for pcol in schema.primaryColumns()]
%>
% for record, columns in changes:
<%
updates = []
translation_updates = {}
translation_columns = {}
translation_values = {}
pkey = str(len(IO))
IO[pkey] = record.primaryKey()
for column in columns:
if column.isTranslatable():
for locale, value in record.recordValue(column.name(), locale='all').items():
key = str(len(IO))
IO[key] = SQL.datastore().store(column, value)
translation_updates.setdefault(locale, [])
translation_columns.setdefault(locale, [])
translation_values.setdefault(locale, [])
translation_updates[locale].append('"{0}" = %({1})s'.format(column.fieldName(), key))
translation_columns[locale].append('"{0}"'.format(column.fieldName()))
translation_values[locale].append('%({0})s'.format(key))
else:
key = str(len(IO))
IO[key] = SQL.datastore().store(column,
record.recordValue(column.name()))
updates.append('"{0}" = %({1})s'.format(column.fieldName(), key))
%>
% if updates:
UPDATE "${table_name}"
SET ${',\n '.join(updates)}
WHERE (${','.join(pcols)}) = %(${pkey})s;
% endif
% if translation_updates:
% for locale in translation_updates:
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM "${table_name}_i18n"
WHERE "${table_name}_id" = %(${pkey})s AND "locale" = '${locale}'
)
THEN
INSERT INTO "${table_name}_i18n" ("${table_name}_id", "locale", ${', '.join(translation_columns[locale])})
VALUES (%(${pkey})s, '${locale}', ${', '.join(translation_values[locale])});
ELSE
UPDATE "${table_name}_i18n"
SET ${',\n '.join(translation_updates[locale])}
WHERE "${table_name}_id" = %(${pkey})s AND "locale" = '${locale}';
END IF;
END$$;
% endfor
% endif
% endfor
% if orb.Query.typecheck(value):
<%
val_schema = value.table().schema()
val_col = value.column()
query_field = QUOTE(schema_alias or val_schema.dbname(), val_col.fieldName())
%>
% if query.isInverted():
${query_field} ${op} ${field}
% else:
${field} ${op} ${query_field}
% endif
% elif value is None:
% if operator == orb.Query.Op.Is:
${field} IS NULL
% elif operator == orb.Query.Op.IsNot:
${field} IS NOT NULL
% endif
% elif orb.RecordSet.typecheck(value):
<% SELECT = SQL.byName('SELECT') %>
${field} ${op} (
${SELECT(value, default_columns=['id'], IO=IO).strip(';')}
)
% else:
<%
if operator in (orb.Query.Op.IsIn, orb.Query.Op.IsNotIn) and not value:
raise orb.errors.QueryIsNull()
ID = schema.primaryColumn().fieldName()
key = str(len(IO))
key_id = '%({0})s'.format(key)
if query.operatorType() in (query.Op.Contains, query.Op.DoesNotContain):
store_value = '%{0}%'.format(value)
elif query.operatorType() in (query.Op.Startswith, query.Op.DoesNotStartwith):
store_value = '{0}%'.format(value)
elif query.operatorType() in (query.Op.Endswith, query.Op.DoesNotEndwith):
store_value = '%{0}'.format(value)
else:
store_value = value
IO[key] = SQL.datastore().store(column, store_value)
%>
% if column.isTranslatable():
## check to see if this column has already been merged
% if column in GLOBALS['field_mapper']:
${field} ${op} %(${key})s
% else:
<% table_name = schema.dbname() %>
"${schema_alias or table_name}"."${ID}" IN (
SELECT "${table_name}_id"
FROM "${table_name}_i18n"
% if query.isInverted():
WHERE %(${key})s ${op} "${table_name}_i18n"."${column.fieldName()}"
% else:
WHERE "${table_name}_i18n"."${column.fieldName()}" ${op} %(${key})s
% endif
)
% endif
% elif query.isInverted():
${key_id} ${op} ${field}
% else:
${field} ${op} ${key_id}
% endif
% endif
"""
Defines the data store logic for PSQL databases.
"""
from orb import DataStore
class PSQLDataStore(DataStore):
pass
DataStore.registerAddon('Postgres', PSQLDataStore)
import logging
import orb
from collections import defaultdict
from orb import errors
from orb.core.backends.sql.abstractsql import SQL
log = logging.getLogger(__name__)
# A
# ----------------------------------------------------------------------
class ADD_COLUMN(SQL):
def collectFlags(self, column):
# determine all the flags for this column
flags = []
Flags = orb.Column.Flags
for flag in column.iterFlags():
flag_sql = self.baseSQL().byName('Flag::{0}'.format(Flags(flag)))
if flag_sql:
flags.append(flag_sql)
return flags
def render(self, column, **scope):
"""
Generates the ADD COLUMN sql for an <orb.Column> in Postgres.
:param column | <orb.Column>
**scope | <keywords>
:return <str>
"""
if column.reference() and not column.referenceModel():
raise errors.TableNotFound(column.reference())
type_name = column.columnTypeText()
sql_type = self.baseSQL().byName('Type::{0}'.format(type_name))
if not sql_type:
raise errors.DatabaseError('Unknown column type: {0}'.format(type_name))
new_scope = {
'column': column,
'field': column.fieldName(),
'reference': column.referenceModel().schema().dbname() if column.reference() else '',
'type': sql_type,
'flags': self.collectFlags(column),
'max_length': column.maxlength() or self.baseSQL().byName('Length::{0}'.format(type_name))
}
new_scope.update(scope)
return super(ADD_COLUMN, self).render(**new_scope)
class ALTER_TABLE(SQL):
@staticmethod
def collectColumns(schema, selection):
columns = defaultdict(list)
if selection is None:
return columns
columns['primary'] = schema.primaryColumns()
# extract the columns that we'll need
for column in sorted(selection, key=lambda x: x.fieldName()):
if column.isAggregate() or column.isJoined() or column.isProxy():
continue
elif column.primary():
if schema.inherits():
continue
else:
columns['primary'].append(column)
columns['all'].append(column)
elif column.isTranslatable():
columns['all'].append(column)
columns['i18n'].append(column)
else:
columns['all'].append(column)
columns['base'].append(column)
return columns
def render(self, schema, added=None, removed=None, **scope):
"""
Generates the ALTER TABLE sql for an <orb.Table>.
:param schema | <orb.TableSchema>
added | [<orb.Column>, ..] || None
removed | [<orb.Column>, ..] || None
**scope | <dict>
:return <str>
"""
if not isinstance(schema, orb.TableSchema):
return ''
db = scope.get('db', orb.system.database())
new_columns = self.collectColumns(schema, added)
old_columns = self.collectColumns(schema, removed)
# define the new scope
new_scope = {
'schema': schema,
'table': schema.dbname(),
'added': new_columns,
'removed': old_columns,
'owner': db.username(),
'inherits': schema.inheritsModel().schema().dbname() if schema.inherits() else '',
# define some useful sql queries by default
'ADD_COLUMN': self.baseSQL().byName('ADD_COLUMN'),
'ADD_CONSTRAINT': self.baseSQL().byName('ADD_CONSTRAINT'),
'CREATE_INDEX': self.baseSQL().byName('CREATE_INDEX')
}
# update any user overrides
new_scope.update(scope)
return super(ALTER_TABLE, self).render(**new_scope)
# C
# ----------------------------------------------------------------------
class CREATE_TABLE(SQL):
@staticmethod
def collectColumns(table):
schema = table.schema()
columns = defaultdict(list)
# extract the columns that we'll need
for column in sorted(schema.columns(recurse=False), key=lambda x: x.fieldName()):
if column.isAggregate() or column.isJoined() or column.isProxy() or column.isReference():
continue
elif column.primary():
if schema.inherits():
continue
else:
columns['primary'].append(column)
columns['all'].append(column)
elif column.isTranslatable():
columns['all'].append(column)
columns['i18n'].append(column)
else:
columns['all'].append(column)
columns['base'].append(column)
return columns
def render(self, table, db=None, **scope):
"""
Generates the CREATE TABLE sql for an <orb.Table>.
:param table | <orb.Table>
**scope | <dict>
:return <str>
"""
# ensure this is in fact a table type
if not orb.Table.typecheck(table):
return ''
schema = table.schema()
db = db or orb.system.database()
new_columns = self.collectColumns(table)
# define the new scope
new_scope = {
'schema': schema,
'table': schema.dbname(),
'columns': new_columns,
'owner': db.username(),
'inherits': schema.inheritsModel().schema().dbname() if schema.inherits() else '',
# define some useful sql queries by default
'ADD_COLUMN': self.baseSQL().byName('ADD_COLUMN'),
'ADD_CONSTRAINT': self.baseSQL().byName('ADD_CONSTRAINT'),
'CREATE_INDEX': self.baseSQL().byName('CREATE_INDEX'),
}
if not (new_scope['columns']['primary'] or new_scope['inherits']):
raise errors.DatabaseError('No primary keys defined for {0}.'.format(schema.name()))
# update any user overrides
new_scope.update(scope)
return super(CREATE_TABLE, self).render(**new_scope)
class CREATE_VIEW(SQL):
def render(self, view, db=None, **scope):
# ensure this is in fact a table type
if not orb.View.typecheck(view):
return ''
schema = view.schema()
scope['schema'] = schema
return super(CREATE_VIEW, self).render(**scope)
class CREATE_INDEX(SQL):
def render(self, index_or_column, **scope):
# create an index here
if isinstance(index_or_column, orb.Index):
scope['index'] = index_or_column
scope['column'] = None
else:
scope['column'] = index_or_column
scope['index'] = None
return super(CREATE_INDEX, self).render(**scope)
# D
#----------------------------------------------------------------------
class DELETE(SQL):
def render(self, table, query, **scope):
"""
Generates the DELETE sql for an <orb.Table>.
:param table | <orb.Table>
query | <orb.Query>
**scope | <dict>
:return <str>
"""
io = scope.get('IO', {})
# noinspection PyShadowingNames
WHERE = self.baseSQL().byName('WHERE')
scope['table'] = table.schema().dbname()
scope['schema'] = table.schema()
scope['query'] = query
scope['where'] = WHERE(table.schema(), query, IO=io)
return super(DELETE, self).render(**scope)
# E
#----------------------------------------------------------------------
class ENABLE_INTERNALS(SQL):
def render(self, enabled, schema=None, **scope):
scope['enabled'] = enabled
scope['table'] = schema.dbname() if schema else ''
return super(ENABLE_INTERNALS, self).render(**scope)
# I
#----------------------------------------------------------------------
class INSERT(SQL):
def collectInsertions(self, records, cols, io, locale):
columns = defaultdict(list)
insertions = defaultdict(list)
for i, record in enumerate(records):
values = record.recordValues(locale=locale, kind=orb.Column.Kind.Field, key='column')
row_all = []
row_base = []
row_i18n = []
store = self.datastore()
for column in cols:
# do not insert auto-incrementing columns
if column.autoIncrement():
continue
# extract the value from the column
try:
value = store.store(column, values[column])
except KeyError:
raise errors.ValueNotFound(record, column.name())
# store the columns we're using the first pass through
if not i:
columns['all'].append(column)
if not column.isTranslatable():
columns['base'].append(column)
else:
columns['i18n'].append(column)
# store the insertion key/value pairing
key = len(io)
key_ref = '%({0})s'.format(key)
io[str(key)] = value
row_all.append(key_ref)
if not column.isTranslatable():
row_base.append(key_ref)
else:
row_i18n.append(key_ref)
if row_all:
insertions['all'].append(row_all)
if row_base:
insertions['base'].append(row_base)
if row_i18n:
insertions['i18n'].append(row_i18n)
return columns, insertions
def render(self, schema, records, columns=None, **scope):
"""
Generates the INSERT sql for an <orb.Table>.
:param schema | <orb.Table> || <orb.TableSchema>
records | [<orb.Table>, ..]
columns | [<str>, ..]
**scope | <dict>
:return <str>
"""
if orb.Table.typecheck(schema):
schema = schema.schema()
elif orb.View.typecheck(schema):
raise errors.QueryInvalid('Views are read-only.')
if columns is None:
columns = schema.columns(kind=orb.Column.Kind.Field)
else:
columns = [schema.column(col) for col in columns]
locale = scope.get('locale', orb.system.locale())
io = scope.get('IO', {})
io['locale'] = locale
columns, insertions = self.collectInsertions(records, columns, io, locale)
new_scope = {
'table': schema.dbname(),
'schema': schema,
'records': records,
'columns': columns,
'insertions': insertions,
'IO': io
}
new_scope.update(**scope)
return super(INSERT, self).render(**new_scope)
class INSERTED_KEYS(SQL):
def render(self, schema, count=1, **scope):
"""
Generates the INSERTED KEYS sql for an <orb.Table> or <orb.TableSchema>.
:param schema | <orb.Table> || <orb.TableSchema>
count | <int>
**scope | <dict>
:return <str>
"""
if orb.Table.typecheck(schema):
schema = schema.schema()
elif orb.View.typecheck(schema):
raise errors.QueryInvalid('Views are read-only.')
scope['schema'] = schema
scope['field'] = schema.primaryColumns()[0].fieldName()
scope['table'] = schema.dbname()
scope['count'] = count
return super(INSERTED_KEYS, self).render(**scope)
# Q
#----------------------------------------------------------------------
class QUOTE(SQL):
def render(self, *text, **scope):
"""
Wraps the inputted text in SQL safe quotes for this language.
:param text | [<str>, ..]
:return <str>
"""
scope.setdefault('joiner', '.')
scope['text'] = text
return super(QUOTE, self).render(**scope)
# S
#----------------------------------------------------------------------
class SCHEMA_INFO(SQL):
def render(self, **scope):
try:
namespace = scope['options'].namespace or 'public'
except (KeyError, AttributeError):
namespace = 'public'
scope['namespace'] = namespace
return super(SCHEMA_INFO, self).render(**scope)
class SELECT(SQL):
def render(self, table_or_records, **scope):
"""
Generates the TABLE EXISTS sql for an <orb.Table>.
:param table_or_records | <orb.Table> or <orb.RecordSet>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
**scope | <dict>
:return <str>
"""
if orb.Table.typecheck(table_or_records) or orb.View.typecheck(table_or_records):
new_scope = {
'table': table_or_records,
'lookup': orb.LookupOptions(**scope),
'options': orb.ContextOptions(**scope)
}
else:
new_scope = {
'table': table_or_records.table(),
'lookup': table_or_records.lookupOptions(**scope),
'options': table_or_records.contextOptions(**scope)
}
# use default columns
if not new_scope['lookup'].columns:
default_columns = scope.pop('default_columns', None)
if default_columns:
schema = new_scope['table'].schema()
new_scope['lookup'].columns = [schema.column(col) for col in default_columns]
new_scope.update(**scope)
return super(SELECT, self).render(**new_scope)
class SELECT_AGGREGATE(SQL):
def render(self, column, **scope):
"""
Generates the SELECT AGGREGATE sql for an <orb.Table>.
:param column | <orb.Column>
**scope | <dict>
:return <str>
"""
scope['column'] = column
return super(SELECT_AGGREGATE, self).render(**scope)
class SELECT_COUNT(SQL):
def render(self, table, **scope):
"""
Generates the SELECT COUNT sql for an <orb.Table>.
:param table | <orb.Table>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
**scope | <dict>
:return <str>
"""
scope['table'] = table
scope['lookup'] = scope.get('lookup', orb.LookupOptions(**scope))
scope['options'] = scope.get('options', orb.ContextOptions(**scope))
return super(SELECT_COUNT, self).render(**scope)
class SELECT_EXPAND(SQL):
def render(self, **scope):
# define optional arguments
scope.setdefault('source_alias', '')
scope.setdefault('column', None)
scope.setdefault('pipe', None)
scope.setdefault('reverseLookup', None)
scope.setdefault('SELECT_EXPAND', self)
return super(SELECT_EXPAND, self).render(**scope)
class SELECT_SHORTCUT(SQL):
def render(self, column, **scope):
scope['column'] = column
return super(SELECT_SHORTCUT, self).render(**scope)
class SELECT_JOINER(SQL):
def render(self, column, **scope):
"""
Generates the SELECT JOINER sql for an <orb.Table>.
:param column | <orb.Column>
**scope | <dict>
:return <str>
"""
scope['column'] = column
return super(SELECT_JOINER, self).render(**scope)
# U
#----------------------------------------------------------------------
class UPDATE(SQL):
def render(self, schema, changes, **scope):
"""
Generates the UPDATE sql for an <orb.Table>.
:param schema | <orb.Table> || <orb.TableSchema>
changes | [(<orb.Table>, [<orb.Column>, ..]) ..]
**scope | <dict>
:return <str>
"""
if orb.Table.typecheck(schema):
schema = schema.schema()
elif orb.View.typecheck(schema):
raise errors.QueryInvalid('Views are read-only.')
scope['schema'] = schema
scope['changes'] = changes
return super(UPDATE, self).render(**scope)
# W
#----------------------------------------------------------------------
class WHERE(SQL):
def queryToSQL(self, schema, query, alias=''):
# noinspection PyShadowingNames
QUOTE = self.baseSQL().byName('QUOTE')
column = query.column(schema)
output = QUOTE(alias or schema.dbname(), column.fieldName())
# process any functions on the query
for func in query.functions():
sql_func = self.baseSQL().byName('Func::{0}'.format(orb.Query.Function(func)))
if not sql_func:
msg = 'Unknown function type {0}'.format(orb.Query.Function(func))
raise errors.QueryInvalid(msg)
else:
output = sql_func.format(output)
return output
def render(self, schema, query, **scope):
"""
Generates the WHERE sql for an <orb.Table>.
:param schema | <orb.Schema>
where | <orb.Query> || <orb.QueryCompound>
**scope | <dict>
:return <str>
"""
if query is None:
return ''
scope.setdefault('IO', {})
scope.setdefault('GLOBALS', {})
io = scope['IO']
glbls = scope['GLOBALS']
db = scope.get('db', orb.system.database())
query = query.expandShortcuts(schema.model())
# create a compound query
if orb.QueryCompound.typecheck(query):
queries = [self(schema, subq, schema_alias=scope.get('schema_alias'), GLOBALS=glbls, IO=io) for subq in query.queries()]
joiner = u' AND ' if query.operatorType() == orb.QueryCompound.Op.And else u' OR '
result = joiner.join([q for q in queries if q])
return u'({0})'.format(result) if result else ''
# create a basic query
else:
glbls.setdefault('join_count', 0)
glbls.setdefault('field_mapper', {})
# grab the column from the query
column = query.column(schema, db=db)
if not column:
raise errors.ColumnNotFound(schema.name(), query.columnName())
# grab the field information
try:
field = glbls['field_mapper'][column]
except KeyError:
field = self.queryToSQL(schema, query, scope.get('schema_alias', ''))
# calculate the field math modifications
for op, target in query.math():
opts = {
'math': orb.Query.Math(op),
'type': orb.ColumnType(column.columnType())
}
base = self.baseSQL()
sql = base.byName('Math::{math}::{type}'.format(**opts)) or base.byName('Math::{math}'.format(**opts))
if not sql:
msg = 'Cannot {math} {type} types.'.format(**opts)
raise errors.QueryInvalid(msg)
else:
field += sql
if orb.Query.typecheck(target):
field += self.queryToSQL(schema, target, scope.get('schema_alias', ''))
else:
key = len(io)
io[str(key)] = target
field += '%({0})s'.format(key)
# calculate the value
operator = query.operatorType()
value = query.value()
# check for a record value
if orb.Table.recordcheck(value) or orb.View.recordcheck(value):
value = value.primaryKey()
# calculate the sql operation
op_name = orb.Query.Op(operator)
op = self.baseSQL().byName('Op::{0}'.format(op_name))
if op is None:
raise orb.errors.QueryInvalid('{0} is an unknown operator.'.format(op_name))
if query.caseSensitive():
case = self.baseSQL().byName('Op::{0}::CaseSensitive'.format(op_name))
op = case or op
# update the scope
scope.setdefault('schema_alias', '')
scope['WHERE'] = self
scope['schema'] = schema
scope['query'] = query
scope['column'] = column
scope['field'] = field
scope['value'] = value
scope['operator'] = operator
scope['op'] = op
return super(WHERE, self).render(**scope)
#----------------------------------------------------------------------
# define base column types
SQL.registerAddon('Type::BigInt', u'BIGINT')
SQL.registerAddon('Type::Bool', u'BOOL')
SQL.registerAddon('Type::ByteArray', u'VARBINARY')
SQL.registerAddon('Type::Color', u'VARCHAR')
SQL.registerAddon('Type::Date', u'DATE')
SQL.registerAddon('Type::Datetime', u'DATETIME')
SQL.registerAddon('Type::DatetimeWithTimezone', u'TIMESTAMP')
SQL.registerAddon('Type::Decimal', u'DECIMAL UNSIGNED')
SQL.registerAddon('Type::Directory', u'VARCHAR')
SQL.registerAddon('Type::Dict', u'BLOB')
SQL.registerAddon('Type::Double', u'DOUBLE UNSIGNED')
SQL.registerAddon('Type::Email', u'VARCHAR')
SQL.registerAddon('Type::Enum', u'INT UNSIGNED')
SQL.registerAddon('Type::Filepath', u'VARCHAR')
SQL.registerAddon('Type::ForeignKey', u'BIGINT UNSIGNED')
SQL.registerAddon('Type::Html', u'TEXT')
SQL.registerAddon('Type::Image', u'BLOB')
SQL.registerAddon('Type::Integer', u'INT UNSIGNED')
SQL.registerAddon('Type::Password', u'VARCHAR')
SQL.registerAddon('Type::Pickle', u'BLOB')
SQL.registerAddon('Type::Query', u'TEXT')
SQL.registerAddon('Type::String', u'VARCHAR')
SQL.registerAddon('Type::Text', u'TEXT')
SQL.registerAddon('Type::Time', u'TIME')
SQL.registerAddon('Type::Url', u'VARCHAR')
SQL.registerAddon('Type::Xml', u'TEXT')
SQL.registerAddon('Type::Yaml', u'TEXT')
SQL.registerAddon('Type::JSON', u'TEXT')
# define the default lengths
SQL.registerAddon('Length::Color', 25)
SQL.registerAddon('Length::String', 256)
SQL.registerAddon('Length::Email', 256)
SQL.registerAddon('Length::Password', 256)
SQL.registerAddon('Length::Url', 500)
SQL.registerAddon('Length::Filepath', 500)
SQL.registerAddon('Length::Directory', 500)
# define the base flags
SQL.registerAddon('Flag::Unique', u'UNIQUE')
SQL.registerAddon('Flag::Required', u'NOT NULL')
SQL.registerAddon('Flag::AutoIncrement', u'AUTO_INCREMENT')
# define the base operators
SQL.registerAddon('Op::Is', u'=')
SQL.registerAddon('Op::IsNot', u'!=')
SQL.registerAddon('Op::LessThan', u'<')
SQL.registerAddon('Op::Before', u'<')
SQL.registerAddon('Op::LessThanOrEqual', u'<=')
SQL.registerAddon('Op::GreaterThanOrEqual', u'>=')
SQL.registerAddon('Op::GreaterThan', u'>')
SQL.registerAddon('Op::After', u'>')
SQL.registerAddon('Op::Matches', u'~*')
SQL.registerAddon('Op::Matches::CaseSensitive', u'~')
SQL.registerAddon('Op::DoesNotMatch', u'!~*')
SQL.registerAddon('Op::DoesNotMatch::CaseSensitive', u'!~*')
SQL.registerAddon('Op::Contains', u'ILIKE')
SQL.registerAddon('Op::Contains::CaseSensitive', u'LIKE')
SQL.registerAddon('Op::Startswith', u'ILIKE')
SQL.registerAddon('Op::Startswith::CaseSensitive', u'LIKE')
SQL.registerAddon('Op::Endswith', u'ILIKE')
SQL.registerAddon('Op::Endswith::CaseSensitive', u'LIKE')
SQL.registerAddon('Op::DoesNotContain', u'NOT ILIKE')
SQL.registerAddon('Op::DoesNotContain::CaseSensitive', u'NOT LIKE')
SQL.registerAddon('Op::DoesNotStartwith', u'NOT ILIKE')
SQL.registerAddon('Op::DoesNotStartwith::CaseSensitive', u'NOT LIKE')
SQL.registerAddon('Op::DoesNotEndwith', u'NOT ILIKE')
SQL.registerAddon('Op::DoesNotEndwith::CaseSensitive', u'NOT LIKE')
SQL.registerAddon('Op::IsIn', u'IN')
SQL.registerAddon('Op::IsNotIn', u'NOT IN')
# define the base functions
SQL.registerAddon('Func::Lower', u'lower({0})')
SQL.registerAddon('Func::Upper', u'upper({0})')
SQL.registerAddon('Func::Abs', u'abs({0})')
SQL.registerAddon('Func::AsString', u'{0}::varchar')
# define the base math operators
SQL.registerAddon('Math::Add', u'+')
SQL.registerAddon('Math::Subtract', u'-')
SQL.registerAddon('Math::Multiply', u'*')
SQL.registerAddon('Math::Divide', u'/')
SQL.registerAddon('Math::And', u'&')
SQL.registerAddon('Math::Or', u'|')
SQL.registerAddon('Math::Add::String', u'||')
SQL.registerAddon('Math::Add::Text', u'||')
""" Defines all the SQL based connection classes. """
# import the backend SQL implementations
from .quickbase import *
""" Defines the backend connection class for Quickbase remote API's. """
from .quickbase import Quickbase
""" Defines the backend connection class for Quickbase databases. """
import datetime
import orb
import logging
import projex.errors
import projex.text
from orb import Query as Q
from orb import errors
from projex.text import nativestring as nstr
from xml.etree import ElementTree
try:
import urllib2
except ImportError:
urllib2 = None
log = logging.getLogger(__name__)
# ----------------------------------------------------------------------
# DEFINE ERROR CLASSES
# ---------------------------------------------------------------------
class ConnectionError(errors.DatabaseError):
pass
class ResponseError(errors.DatabaseError):
pass
class UnknownError(errors.DatabaseError):
pass
# -----------------------------------------------------------------------------
# noinspection PyAbstractClass
class Quickbase(orb.Connection):
# map the default operator types to a SQL operator
OpMap = {
Q.Op.Is: 'EX',
Q.Op.IsNot: 'XEX',
Q.Op.LessThan: 'LT',
Q.Op.LessThanOrEqual: 'LTE',
Q.Op.GreaterThan: 'GT',
Q.Op.GreaterThanOrEqual: 'GTE',
Q.Op.Contains: 'CT',
Q.Op.DoesNotContain: 'XCT',
Q.Op.IsIn: 'HAS',
Q.Op.IsNotIn: 'XHAS',
Q.Op.Startswith: 'SW',
Q.Op.Endswith: 'EW',
Q.Op.Before: 'BF',
Q.Op.After: 'AF',
Q.Op.Between: 'IR',
}
def __init__(self, database):
super(Quickbase, self).__init__(database)
# define custom properties
self._ticket = None
self._timeout = 30
def _fieldIds(self, schema):
"""
Retreives the field ids from the inputted schema, generating them if
necessary.
:param schema | <orb.TableSchema>
:return {<str> fieldName: <str> id, ..}
"""
# map to the id values for the fields
field_ids = schema.property('quickbase_field_ids')
if not field_ids:
field_ids = {}
col_map = dict([(c.displayName(), c.fieldName())
for c in schema.columns(includeProxies=False)])
table_id = schema.dbname()
xresp = self.request(table_id + '?a=td', 'GetSchema')
xtable = xresp.find('table')
xfields = xtable.find('fields')
for xfield in xfields:
fieldName = col_map.get(xfield.find('label').text)
if not fieldName:
continue
field_ids[fieldName] = xfield.get('id', '0')
schema.setProperty('quickbase_field_ids', field_ids)
return field_ids
def _select(self, table_or_join, lookup, options):
"""
Performs the database lookup and returns the raw pymongo information
for processing.
:param table_or_join | <subclass of orb.Table> || <orb.Join>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return <variant>
"""
if not self.open():
return []
schemas = []
# initialize a lookup from a table
if orb.Table.typecheck(table_or_join):
schemas.append((table_or_join.schema(), lookup.columns))
# intialize a lookup from a join
elif orb.Join.typecheck(table_or_join):
log.warning('Joining is not yet supported for Quickbase')
return []
# make sure we have a valid query
else:
raise errors.QueryInvalid('Invalid select option: {0}'.format(table_or_join))
qb_opts = []
if lookup.limit:
qb_opts.append('num-%i' % lookup.limit)
if lookup.start:
qb_opts.append('skip-%i' % lookup.start)
qb_options = '.'.join(qb_opts)
output = []
for schema, columns in schemas:
table_id = schema.dbname()
field_ids = self._fieldIds(schema)
# generate the request parameters
request = {'includeRids': 1}
if options:
request['options'] = qb_options
# collect specific columns
if lookup.columns is not None:
fields = [schema.column(c).fieldName() for c in lookup.columns]
fids = [field_ids.get(x, '0') for x in fields]
request['clist'] = '.'.join(fids)
# collect specific entries
if lookup.where:
where = lookup.where.expandShortcuts(schema)
request['query'] = self.queryCommand(schema, where)
# convert the sorting keys
if lookup.order:
sort = []
direc = 'sortorder-A'
for col, direction in lookup.order:
if direction == 'desc':
direc = 'sortorder-D'
fid = field_ids.get(schema.column(col).fieldName(), '0')
sort.append(fid)
request['slist'] = '.'.join(sort)
if qb_options:
request['options'] += '.%s' % direc
else:
request['options'] = direc
# collect the records
xresponse = self.request(table_id + '?a=td', 'DoQuery', request)
# look for error data
error_code = xresponse.find('errcode').text
error_text = xresponse.find('errtext').text
if error_code != '0':
raise errors.DatabaseError(error_text)
# extract the record data
records = []
for xrecord in xresponse:
if not xrecord.tag == 'record':
continue
record = {'_id': int(xrecord.get('rid'))}
for xfield in xrecord:
fname = xfield.tag
fval = xfield.text
col = schema.column(fname)
if not col:
continue
fname = col.name()
ftype = col.columnType()
if ftype == orb.ColumnType.Bool:
fval = fval == '1'
elif ftype in (orb.ColumnType.Integer,
orb.ColumnType.Enum,
orb.ColumnType.ForeignKey):
try:
fval = int(fval)
except:
fval = 0
elif ftype in (orb.ColumnType.Double, orb.ColumnType.Decimal):
try:
fval = float(fval)
except:
fval = 0.0
elif ftype == orb.ColumnType.Date:
try:
fval = datetime.datetime.strptime(fval, '%Y-%m-%d')
fval = fval.date()
except:
fval = None
elif ftype == orb.ColumnType.Datetime:
try:
fval = datetime.datetime.strptime(fval,
'%Y-%m-%d %H:%M:%S')
except:
fval = None
elif ftype == orb.ColumnType.Time:
try:
fval = datetime.datetime.strptime(fval, '%H:%M:%S')
fval = fval.time()
except:
fval = None
record[fname] = fval
records.append(record)
output.append((schema, records))
return output
def count(self, table_or_join, lookup, options):
"""
Returns the count for the results based on the given query.
:return <int>
"""
if not self.open():
return 0
# initialize a lookup from a table
if not orb.Table.typecheck(table_or_join):
log.warning('Joining is not yet supported for Quickbase')
return []
schema = table_or_join.schema()
table_id = schema.dbname()
if not lookup.where:
xresponse = self.request(table_id + '?a=td', 'GetNumRecords')
return int(xresponse.find('num_records').text)
else:
opts = {}
where = lookup.where.expandShortcuts(table_or_join)
opts['query'] = self.queryCommand(schema, where)
xresponse = self.request(table_id + '?a=td', 'DoQueryCount', opts)
return int(xresponse.find('numMatches').text)
def isConnected(self):
"""
Returns whether or not this connection is established.
:return <bool>
"""
return self._ticket is not None
def open(self):
"""
Opens a new database connection to the datbase defined
by the inputted database.
:return <bool> success
"""
if not self._database:
self._failed = True
raise errors.DatabaseNotFound()
elif self._ticket:
return True
user = self._database.username()
pword = self._database.password()
request = {'username': user, 'password': pword}
response = self.request('main',
'Authenticate',
request,
required=['ticket', 'userid'],
useTicket=False)
self._ticket = response['ticket']
self._qb_user_id = response['userid']
return True
def queryCommand(self, schema, query):
"""
Converts the inputted query object to a SQL query command.
:param schema | <orb.TableSchema> || None
:param query | <orb.Query>
:return <str> | query
"""
if query.isNull():
log.debug('Quickbase.queryCommand: NULL QUERY.')
return ''
# load query compoundss
if orb.QueryCompound.typecheck(query):
# extract the rest of the query information
output = []
# determine the joining operator
join = ' AND '
if query.operatorType() == orb.QueryCompound.Op.Or:
join = ' OR '
# generate the queries
for q in query.queries():
q_str = self.queryCommand(schema, q)
if q_str:
output.append(q_str)
return join.join(output)
# load Query objects
# initialize the field query objects
if query.table():
schema = query.table().schema()
# make sure we have a schema to work with
elif not schema:
raise errors.QueryInvalid(query)
value = query.value()
dbname = schema.dbname()
op = query.operatorType()
colname = query.columnName()
col = schema.column(colname)
if not col:
raise errors.ColumnNotFound(dbname, colname)
# extract the primary key information
if orb.Table.recordcheck(value) or orb.View.recordcheck(value):
value = self.recordCommand(col, value)
# extract the primary key information for a list of items
elif type(value) in (list, tuple):
value = [self.recordCommand(col, entry) for entry in value]
value = ','.join([str(x) for x in value])
# extract the primary key information from a record set
elif isinstance(value, orb.RecordSet):
field = orb.system.settings.primaryField()
value = ','.join([str(x) for x in value.values(field)])
field = col.fieldName()
try:
return "{'%s'.%s.'%s'}" % (field, self.OpMap[op], value)
except KeyError:
return ''
# noinspection PyUnusedLocal
@staticmethod
def recordCommand(column, value):
"""
Converts the inputted value from a record instance to a mongo id pointer,
provided the value is a table type. If not, the inputted value is
returned unchanged.
:param column | <orb.Column>
value | <variant>
:return <variant>
"""
# handle conversions of non record values to mongo object ids when
# necessary
if not (orb.Table.recordcheck(value) or orb.View.recordcheck(value)):
return value
pkey = value.primaryKey()
if not pkey:
raise errors.PrimaryKeyNotDefined(value)
if type(pkey) in (list, tuple, set):
if len(pkey) == 1:
pkey = pkey[0]
else:
pkey = tuple(pkey)
return nstr(pkey)
def request(self,
dbname,
action,
request=None,
required=None,
useTicket=True,
useToken=True):
"""
Does a Quickbase query based on the inputted action information.
:param action | <str> | 'API_' will be prepended automatically
request | {<str> key: <variant> value}
required | [<str>, ..] || None
useTicket | <bool>
:return [<str>, ..] | response
"""
# generate the URL for the database
url = self.database().host() + '/db/' + dbname
if request is None:
request = {}
# include the ticket if required
if useTicket:
request['ticket'] = self._ticket
if useToken:
request['apptoken'] = self.database().applicationToken()
request['encoding'] = 'UTF-8'
# generate the header and request information
data = self.buildRequest(**request)
headers = {
'Content-Type': 'application/xml',
'Accept-Charset': 'utf-8',
'QUICKBASE-ACTION': 'API_' + action,
}
# debug the lookup
log.debug(url)
log.debug(nstr(headers))
log.debug(projex.text.encoded(data))
# create the request
request = urllib2.Request(url, data, headers)
try:
f = urllib2.urlopen(request, timeout=self._timeout)
response = f.read()
except urllib2.HTTPError as error:
try:
response = error.read()
except IOError:
response = None
raise ConnectionError(nstr(error))
except urllib2.URLError as error:
raise ConnectionError(nstr(error))
except Exception as error:
raise UnknownError(nstr(error))
# Parse the response XML
try:
response.decode('utf-8')
except UnicodeError:
# Quickbase sometimes returns cp1252 even when ask for utf-8, fix it
response = response.decode('cp1252').encode('utf-8')
try:
parsed = ElementTree.XML(response)
except SyntaxError as error:
raise ResponseError(nstr(error))
# Ensure it's not a QuickBase error
error_code = parsed.find('errcode')
if error_code is None:
raise ResponseError('"errcode" not in response')
try:
error_code = int(error_code.text)
except ValueError:
raise ResponseError('"errcode" not an integer')
if error_code != 0:
error_text = parsed.find('errtext')
error_text = error_text.text if error_text is not None else '[no error text]'
raise ResponseError(error_text)
if required:
# Build dict of required response fields caller asked for
values = {}
for field in required:
value = parsed.find(field)
if value is None:
err = '"{0}" not in response'.format(field)
raise ResponseError(err)
values[field] = value.text or ''
return values
else:
# Return parsed XML directly
return parsed
def select(self, table_or_join, lookup, options):
"""
Selects the records from the database for the inputted table or join
instance based on the given lookup and options.
:param table_or_join | <subclass of orb.Table>
lookup | <orb.LookupOptions>
options | <orb.ContextOptions>
:return [<variant> result, ..]
"""
raw = self._select(table_or_join, lookup, options)
output = []
for schema, results in raw:
for result in results:
# force the primary column to use the object id
db_result = {}
for field, value in result.items():
col = schema.column(field)
if col:
db_result[col.name()] = value
else:
db_result[field] = value
output.append(db_result)
return output
@staticmethod
def buildRequest(**fields):
r"""
Builds a QuickBase request XML with given fields. Fields can be straight
key=value, or if value is a 2-tuple it represents (attr_dict, value),
or if value is a list of values or 2-tuples the output will
contain multiple entries.
"""
request = ElementTree.Element('qdbapi')
# noinspection PyShadowingNames
def add_sub_element(field, value):
if isinstance(value, tuple):
attrib, value = value
attrib = dict((k, nstr(v)) for k, v in attrib.iteritems())
else:
attrib = {}
sub_element = ElementTree.SubElement(request, field, **attrib)
sub_element.text = nstr(value)
for field, values in fields.iteritems():
if not isinstance(values, list):
values = [values]
for value in values:
add_sub_element(field, value)
string = ElementTree.tostring(request, encoding='UTF-8')
return string
if urllib2:
orb.Connection.registerAddon('Quickbase', Quickbase)
""" Defines the global environment information for managing databases across. \
Multiple environments. """
import projex.text
import xml.parsers.expat
from projex.lazymodule import lazy_import
from projex.text import nativestring as nstr
from xml.etree import ElementTree
orb = lazy_import('orb')
class Environment(object):
def __init__(self,
name='production',
description='',
referenced=False,
manager=None):
self._name = name
self._description = description
self._database = None
self._databases = {}
self._referenced = referenced
self._default = False
self._manager = manager
def clear(self):
"""
Clears this environment's information.
"""
for db in self._databases.values():
db.disconnect()
def database(self, name=''):
"""
Returns the database with the inputted name for this environment. If \
no database is specifically defined for this environment, then the \
default entry will be returned from the database class.
:sa <orb.Database.byName>
:return <orb.Database> || None
"""
if name:
db = orb.Database.byName(name)
return self._databases.get(nstr(name), db)
else:
return self._database
def databases(self):
"""
Returns a list of all the databases in this environment.
:return [<orb.Database>, ..]
"""
return self._databases.values()
def description(self):
"""
Returns the description of this environment.
:return <str>
"""
return self._description
def defaultDatabase(self):
"""
Returns the database that is the default for this environment.
:return <orb.Database> || None
"""
# use the default database from the list
first = None
for db in self._databases.values():
if not first:
first = db
if db.isDefault():
return db
# use the current database from the orb system
db = orb.system.database()
if not db:
db = first
return db
def isDefault(self):
"""
Returns whether or not this environment is the default environment.
:return <bool>
"""
return self._default
def isReferenced(self):
"""
Returns whether or not this environment is referenced from a
separate ORB file.
:return <bool>
"""
return self._referenced
def manager(self):
"""
Returns the manager associated with this environment. If
no specific manager has been assigned, than the global orb
system manager will be returned.
:return <orb.Manager>
"""
if not self._manager:
return orb.system
else:
return self._manager
def name(self):
"""
Returns the name of this environment.
:return <str>
"""
return self._name
def registerDatabase(self, database, active=False):
"""
Registers a particular database with this environment.
:param database | <orb.Database>
"""
self._databases[database.name()] = database
if active or not self._database:
self._database = database
def save(self, filename):
"""
Saves the environment out to the inputted filename.
:param filename | <str>
"""
# create the orb information
import orb
elem = ElementTree.Element('orb')
elem.set('version', orb.__version__)
envs = ElementTree.SubElement(elem, 'environments')
self.toXml(envs)
projex.text.xmlindent(elem)
env_file = open(filename, 'w')
env_file.write(ElementTree.tostring(elem))
env_file.close()
def setCurrent(self):
"""
Sets this environment as the current database environment.
:return <bool> | changed
"""
self.manager().setEnvironment(self)
def setDatabase(self, database):
"""
Sets the active database to the inputted database.
:param database | <orb.Database>
"""
self._database = database
def setDescription(self, description):
"""
Sets the description for this environment to the inputted description.
:param description | <str>
"""
self._description = description
def setDefault(self, state):
"""
Sets this environment to the default environment.
:param state | <bool>
:return <bool> | changed
"""
if self._default == state:
return False
self._default = state
if state:
for env in self.manager().environments():
if not env.isDefault():
continue
env._default = False
break
return True
def setName(self, name):
"""
Sets the name for this environment to the inputted name.
:param name | <str>
"""
self._name = nstr(name)
def toXml(self, xparent):
"""
Converts this environment to XML data and returns it.
:param xparent | <xml.etree.ElementTree.Element>
"""
xenv = ElementTree.SubElement(xparent, 'environment')
xenv.set('name', nstr(self.name()))
xenv.set('default', nstr(self.isDefault()))
ElementTree.SubElement(xenv, 'description').text = self.description()
xdbs = ElementTree.SubElement(xenv, 'databases')
for db in self._databases.values():
db.toXml(xdbs)
return xenv
def unregisterDatabase(self, database):
"""
Un-registers a particular database with this environment.
:param database | <orb.Database>
"""
if database.name() in self._databases:
database.disconnect()
self._databases.pop(database.name())
@staticmethod
def current(manager=None):
"""
Returns the current environment for the orb system.
:param manager | <orb.Manager> || None
:return <orb.Environment> || None
"""
if manager is None:
manager = orb.system
return manager.database()
@staticmethod
def fromXml(xenv, referenced=False):
"""
Creates a new environment instance and returns it from the inputted \
xml data.
:param xenv | <xml.etree.ElementTree.Element>
"""
env = Environment(referenced=referenced)
env.setName(xenv.get('name', ''))
env.setDefault(xenv.get('default') == 'True')
xdesc = xenv.find('description')
if xdesc is not None:
env.setDescription(xdesc.text)
# load databases
xdbs = xenv.find('databases')
if xdbs is not None:
for xdb in xdbs:
db = orb.Database.fromXml(xdb, referenced)
env.registerDatabase(db)
return env
@staticmethod
def load(filename):
"""
Loads the environments defined within the inputted filename.
:param filename | <str>
:return <Environment> || None
"""
try:
xtree = ElementTree.parse(filename)
except xml.parsers.expat.ExpatError:
return None
# create a new database
return Environment.fromXml(xtree.getroot())
@staticmethod
def findDefault(manager=None):
"""
Returns the default environment, if any is set to default.
:param manager | <orb.Manager> || None
:return <orb.Environment> || None
"""
if manager is None:
manager = orb.system
for env in manager.environments():
if env.isDefault():
return env
return None
"""
Defines the different options that can be used throughout the system. Often,
classes and methods will accept a variable set of keyword arguments. As
opposed to hard-coding these options everywhere and updating them, they
will map to one of the classes defined in this module.
"""
from collections import OrderedDict
from projex.text import nativestring as nstr
from projex.lazymodule import lazy_import
import projex.rest
orb = lazy_import('orb')
class Options(object):
""""
Defines a unique instance of information that will be bundled when
calling different methods within the connections class.
The ContextOptions class will accept a set of keyword arguments to
control how the action on the database will be affected. The options are:
"""
DEFAULTS = {}
def __init__(self, **kwds):
# update from the other database option instance
other_options = kwds.pop('options', None)
if isinstance(other_options, ContextOptions):
for key, default in self.DEFAULTS.items():
kwds.setdefault(key, other_options.raw_values.get(key, default))
self.__dict__['raw_values'] = {}
for key, value in self.DEFAULTS.items():
self.raw_values[key] = kwds.get(key, value)
def __str__(self):
"""
Returns a string for this instance.
:return <str>
"""
opts = []
for key, default in self.DEFAULTS.items():
val = getattr(self, key)
if val == default:
continue
opts.append('{0}:{1}'.format(key, val))
return '<{0} {1}>'.format(type(self).__name__, ' '.join(opts))
def __getattr__(self, key):
try:
return self.raw_values[key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
if key in self.raw_values:
self.raw_values[key] = value
else:
raise AttributeError(key)
def __hash__(self):
"""
Returns a hash representation for this instance.
:return <hash>
"""
return hash(nstr(self))
def assigned(self):
"""
Returns a dictionary of assigned values for this context option.
:return {<str> key: <variant> value, ..}
"""
return {k: v for k, v in self.raw_values.items() if self.DEFAULTS[k] != v}
def copy(self):
"""
Returns a copy of this database option set.
:return <orb.ContextOptions>
"""
return type(self)(**self.raw_values)
def isNull(self):
"""
Returns whether or not this option set has been modified.
:return <bool>
"""
return self.raw_values.values() == self.DEFAULTS.values()
def update(self, options):
"""
Updates this lookup set with the inputted options.
:param options | <dict>
"""
self.raw_values.update({k: v for k, v in options.items() if k in self.DEFAULTS})
def toDict(self):
"""
Returns a dictionary representation of this database option set.
:return <dict>
"""
return self.raw_values.copy()
@classmethod
def fromDict(cls, data):
"""
Returns a new lookup options instance based on the inputted data
dictionary.
:param data | <dict>
:return <LookupOptions>
"""
return cls(**data)
class ContextOptions(Options):
""""
Defines a unique instance of information that will be bundled when
calling different methods within the connections class.
The ContextOptions class will accept a set of keyword arguments to
control how the action on the database will be affected. The options are:
"""
DEFAULTS = {
# database options
'namespace': None,
'flags': 0,
'autoIncrement': True,
'deleteFlags': orb.DeleteFlags.all(),
'useCache': False,
'database': None,
# output options
'inflated': True,
'format': 'object',
# context options
'context': None,
'locale': None,
'timezone': None,
'request': None,
# debug options
'force': False,
'dryRun': False
}
@property
def database(self):
return self.raw_values['database'] or orb.system.database()
@property
def locale(self):
"""
Returns the locale for this context, either by the specified locale or the root manager.
:return <str>
"""
return self.raw_values['locale'] or orb.system.locale(self)
@property
def timezone(self):
"""
Returns the timezone for this context, either by the specified zone or from the root manager.
:return <str>
"""
return self.raw_values['timezone'] or orb.system.timezone(self)
def update(self, options):
"""
Updates this lookup set with the inputted options.
:param options | <dict>
"""
new_options = {}
if 'options' in options:
new_options.update(options['options'].raw_values)
new_options.update(options)
super(ContextOptions, self).update(new_options)
# ------------------------------------------------------------------------------
class LookupOptions(object):
"""
Defines a unique instance of information that will be bundled when
calling different query based methods in the connection class.
The LookupOptions class will accept a set of keyword arguments to
control how the action on the database will be affected. The options are:
:param columns | [<str>, ..] || None (default: None)
where | <orb.Query> || <orb.QueryCompound> || None (default: None)
order | [(<str> column, 'asc'/'desc'), ..] || None (default: None)
start | <int> || None (default: None)
limit | <int> || None (default: None)
distinct | <bool> (default: False)
locale | <str> || None | (default: None)
"""
def __init__(self, **kwds):
columns = kwds.get('columns') or []
where = kwds.get('where') or None
order = kwds.get('order') or []
expand = kwds.get('expand') or []
if type(expand) == set:
expand = list(expand)
elif type(expand) == dict:
def expand_string(key, children):
return [key] + [key + '.' + child
for value in [expand_string(k_, v_) for k_, v_ in children.items()]
for child in value]
expand = [entry for item in [expand_string(k, v) for k, v in expand.items()] for entry in item]
elif type(expand) in (str, unicode):
expand = expand.split(',')
if type(order) == set:
order = list(order)
if isinstance(order, (str, unicode)):
order = [(x.strip('+-').strip(), 'desc' if x.startswith('-') else 'asc') for x in order.split(',') if x]
if isinstance(kwds.get('lookup'), LookupOptions):
other = kwds['lookup']
columns += [col for col in other.columns or [] if col not in columns]
# update where
if where is not None:
where &= other.where
elif other.where is not None:
where = other.where
else:
where = None
# update order
order = order or [item for item in other.order or []]
expand = expand or [expanded for expanded in other.expand or []]
kwds.setdefault('start', other.start)
kwds.setdefault('limit', other.limit)
kwds.setdefault('distinct', other.distinct)
kwds.setdefault('pageSize', other.pageSize)
kwds.setdefault('page', other.page)
kwds.setdefault('returning', other.returning)
self.columns = columns or None
self.where = where
self.order = order or None
self.expand = expand or None
self._start = kwds.get('start', None)
self._limit = kwds.get('limit', None)
self.distinct = kwds.get('distinct', False)
self.pageSize = kwds.get('pageSize', None)
self.page = kwds.get('page', -1)
self.returning = kwds.get('returning', 'records')
def __str__(self):
"""
Returns a string for this instance.
:return <str>
"""
opts = []
for key in ('columns',
'where',
'order',
'_start',
'_limit',
'pageSize',
'page'):
val = getattr(self, key)
if val is None:
continue
if orb.Query.typecheck(val):
val = hash(val)
opts.append('{0}:{1}'.format(key, val))
if self.distinct:
opts.append('distinct:True')
return '<LookupOptions {0}>'.format(' '.join(opts))
def __hash__(self):
"""
Returns a hash representation for this instance.
:return <hash>
"""
return hash(nstr(self))
def copy(self):
"""
Returns a copy of this database option set.
:return <orb.ContextOptions>
"""
return LookupOptions(
columns=self.columns[:] if self.columns else None,
where=self.where.copy() if self.where is not None else None,
order=self.order[:] if self.order else None,
start=self._start,
limit=self._limit,
distinct=self.distinct,
expand=self.expand[:] if self.expand else None,
page=self.page,
pageSize=self.pageSize,
returning=self.returning,
)
def expandtree(self):
"""
Returns a dictionary of nested expansions for this option set. This will inflate the dot noted
paths for each expanded column.
:return <dict>
"""
if not self.expand:
return {}
def build_tree(tree, name):
name, _, remain = name.partition('.')
tree.setdefault(name, {})
if remain:
build_tree(tree[name], remain)
output = {}
for path in self.expand:
build_tree(output, path)
return output
def isNull(self):
"""
Returns whether or not this lookup option is NULL.
:return <bool>
"""
for key in ('columns',
'where',
'order',
'_start',
'_limit',
'distinct',
'expand',
'pageSize',
'page'):
if getattr(self, key):
return False
return True
def schemaColumns(self, schema):
return OrderedDict([(schema.column(col), 1) for col in self.columns]).keys()
@property
def limit(self):
return self.pageSize or self._limit
@limit.setter
def limit(self, limit):
self._limit = limit
@property
def start(self):
if self.page > 0 and self.pageSize is not None:
# noinspection PyTypeChecker
return self.pageSize * (self.page - 1)
return self._start
@start.setter
def start(self, start):
self._start = start
# noinspection PyProtectedMember
def update(self, options):
"""
Updates this lookup set with the inputted options.
:param options | <dict>
"""
if 'lookup' in options:
other = options['lookup']
options.setdefault('where', other.where)
options.setdefault('columns', other.columns)
options.setdefault('order', other.order)
options.setdefault('start', other._start)
options.setdefault('limit', other._limit)
options.setdefault('expand', other.expand)
options.setdefault('pageSize', other.pageSize)
options.setdefault('page', other.page)
options.setdefault('returning', other.returning)
columns = self.columns or []
columns += [col for col in options.get('columns') or [] if col not in columns]
# update where
if options.get('where') is not None:
self.where = options['where'] & self.where
order = options.get('order') or []
if isinstance(order, (str, unicode)):
order = [(x.strip('+-').strip(), 'desc' if x.startswith('-') else 'asc') for x in order.split(',') if x]
expand = options.get('expand') or []
if type(expand) == dict:
def expand_string(key, children):
return [key] + [key + '.' + child
for value in [expand_string(k_, v_) for k_, v_ in children.items()]
for child in value]
expand = [entry for item in [expand_string(k, v) for k, v in expand.items()] for entry in item]
if type(expand) in (str, unicode):
expand = expand.split(',')
self.columns = columns or None
self.expand = (self.expand or [] + expand) or None
self.order = (self.order or [] + order) or None
self._start = options.get('start', self._start)
self._limit = options.get('limit', self._limit)
self.distinct = options.get('distinct', self.distinct)
self.pageSize = options.get('pageSize', self.pageSize)
self.page = options.get('page', self.page)
self.returning = options.get('returning', self.returning)
def toDict(self):
"""
Returns a dictionary representation of the lookup options.
:return <dict>
"""
out = {}
if self.columns:
out['columns'] = self.columns[:]
if self.where:
out['where'] = self.where.toDict()
if self.order:
out['order'] = self.order[:]
if self.start:
out['start'] = self._start
if self.limit:
out['limit'] = self._limit
if self.expand:
out['expand'] = self.expand[:]
if self.page != -1:
out['page'] = self.page
if self.pageSize:
out['pageSize'] = self.pageSize
if self.returning != 'records':
out['returning'] = self.returning
return out
def toXml(self, xparent=None):
raise NotImplementedError
@staticmethod
def fromDict(data):
"""
Returns a new lookup options instance based on the inputted data
dictionary.
:param data | <dict>
:return <LookupOptions>
"""
kwds = {}
kwds.update(data)
if 'where' in data:
kwds['where'] = orb.Query.fromDict(data['where'])
return LookupOptions(**kwds)
@staticmethod
def fromXml(xdata):
raise NotImplementedError
@staticmethod
def fromJSON(jdata):
"""
Restores a LookupOptions item from the JSON dataset.
:param jdata | <dict> || <str>
:return <orb.LookupOptions>
"""
if type(jdata) in (unicode, str):
jdata = projex.rest.unjsonify(jdata)
# restore the query data if applicable
if 'where' in jdata:
jdata['where'] = orb.Query.fromJSON(jdata['where'])
return orb.LookupOptions(**jdata)
""" Defines the Transaction class to handle multiple database transactions """
import threading
from collections import defaultdict
from projex.locks import ReadWriteLock, ReadLocker, WriteLocker
class Transaction(object):
_stack = defaultdict(list)
_stackLock = ReadWriteLock()
def __init__(self, dryRun=False):
self._dryRun = dryRun
self._connections = set()
self._errors = []
def __enter__(self):
self.begin()
# noinspection PyUnusedLocal
def __exit__(self, typ, error, traceback):
if self._dryRun:
self.cancel()
else:
if error:
self._errors.append(error)
self.end()
def cancel(self):
"""
Cancels all the current submissions.
"""
for connection in self._connections:
connection.rollback()
def commit(self):
"""
Commits the changes for this transaction.
"""
for connection in self._connections:
try:
connection.commit()
except StandardError:
connection.rollback()
def begin(self):
"""
Begins a new transaction for this instance.
"""
self._connections.clear()
self._errors = []
Transaction.push(self)
def isErrored(self):
"""
Returns whether or not this transaction has an errored state.
"""
return len(self._errors) > 0
def errors(self):
"""
Returns the errors that occurred during this transaction.
:return [<subclass of Exception>, ..]
"""
return self._errors
def end(self, threadId=None):
"""
Commits all the changes for the database connections that have been
processed while this transaction is active.
"""
# remove the transaction first, or the connection will not commit properly
self.pop(self, threadId)
if not self.isErrored():
self.commit()
else:
for connection in self._connections:
connection.rollback()
def rollback(self, error):
"""
Rolls back the changes to the dirty connections for this instance.
:param error | <subclass of Exception>
"""
self._errors.append(error)
for connection in self._connections:
connection.rollback()
def setDirty(self, connection):
"""
Mark a given connection as being dirty. The connection class will
check the transaction information before committing any database
code. If there is an active transaction, then the connection
will mark itself as dirty for this transaction.
:param connection | <orb.Connection>
"""
self._connections.add(connection)
@staticmethod
def current(threadId=None):
"""
Returns the current transaction for the system.
:return <Transaction> || None
"""
threadId = threadId or threading.current_thread().ident
with ReadLocker(Transaction._stackLock):
stack = Transaction._stack.get(threadId)
return stack[-1] if stack else None
@staticmethod
def push(transaction, threadId=None):
"""
Pushes a new transaction onto the stack.
:param transaction | <Transaction>
"""
threadId = threadId or threading.current_thread().ident
with WriteLocker(Transaction._stackLock):
Transaction._stack[threadId].append(transaction)
@staticmethod
def pop(transaction=None, threadId=None):
"""
Removes the latest transaction from the stack.
:return <Transaction> || None
"""
threadId = threadId or threading.current_thread().ident
with WriteLocker(Transaction._stackLock):
if transaction:
try:
Transaction._stack[threadId].remove(transaction)
except (KeyError, ValueError):
return None
else:
try:
Transaction._stack[threadId].pop()
except IndexError:
return None
"""
Defines methods for aggregation within the database system.
"""
from .converter import DataConverter
from .join import Join
from .piperecordset import PipeRecordSet
from .recordset import RecordSet
from .store import DataStore
""" [desc] """
import logging
from projex.addon import AddonManager
log = logging.getLogger(__name__)
class DataConverter(AddonManager):
"""
Handles safe mapping of record values to database values.
"""
def convert(self, value):
"""
Converts the inputted value to a standard Python value.
:param value | <variant>
:return <variant>
"""
return value
@staticmethod
def toPython(value):
"""
Converts the inputted value to a basic Python value that can be wrapped
for the database.
:param value | <variant>
:return <variant>
"""
for converter in DataConverter.addons().values():
value = converter.convert(value)
return value
# import the data converter classes
from .converters import __plugins__
DataConverter.registerAddonModule(__plugins__)
__recurse__ = False
__toc__ = [r'orb.data.converters.qt']
""" Defines a mapper for converting Qt information to and from the database. """
import binascii
import logging
from projex.text import nativestring as nstr
from projex.lazymodule import lazy_import
from ..converter import DataConverter
log = logging.getLogger(__name__)
QtCore = lazy_import('xqt.QtCore')
class QDataConverter(DataConverter):
"""
Maps Qt values to standard python values.
"""
def convert(self, value):
"""
Converts the inputted value to a Python value.
:param value | <variant>
:return <variant>
"""
val_name = type(value).__name__
if val_name == 'QString':
return nstr(value.toUtf8())
elif val_name == 'QVariant':
return value.toPyObject()
elif val_name == 'QDate':
return value.toPyDate()
elif val_name == 'QDateTime':
return value.toPyDateTime()
elif val_name == 'QTime':
return value.toPyTime()
elif val_name == 'QColor':
return value.name()
elif val_name == 'QIcon':
try:
pixmap = value.pixmap(value.actualSize(QtCore.QSize(256, 256)))
arr = QtCore.QByteArray()
buf = QtCore.QBuffer()
buf.setBuffer(arr)
buf.open(QtCore.QBuffer.WriteOnly)
pixmap.save(buf, 'PNG')
buf.close()
return binascii.b2a_base64(nstr(arr))
except ImportError:
log.error('Cannot store QIcon, xqt.QtCore not defined.')
elif val_name == 'QPixmap':
try:
arr = QtCore.QByteArray()
buf = QtCore.QBuffer()
buf.setBuffer(arr)
buf.open(QtCore.QBuffer.WriteOnly)
value.save(buf, 'PNG')
buf.close()
return binascii.b2a_base64(nstr(arr))
except ImportError:
log.error('Cannot store QPixmap, xqt.QtCore not defined.')
return super(QDataConverter, self).convert(value)
# register the Qt data converter
DataConverter.registerAddon('Qt', QDataConverter())
""" Defines the Join class used when querying multiple tables. """
import logging
from projex.lazymodule import lazy_import
log = logging.getLogger(__name__)
orb = lazy_import('orb')
errors = lazy_import('orb.errors')
class Join(object):
"""
Defines a class for creating joined database lookups, returning
multiple table records on a single select lookup.
"""
def __init__(self, *table_or_queries, **options):
"""
Joins together at least one Table class to be looked up from the
database. You can also provide a column query by passing in a
query instance of a Table and columnName to only return the value
from the given table instead of the inflated table object itself.
:param *args ( <subclass of Table> || <Query>, .. )
:param **options Additional keyword options
#. db <orb.Database>
:usage |>>> from orb import Join as J, Query as Q
|>>> user = User.byFirstAndLastName('Eric','Hulser')
|>>> q = Q(Role,'user')==user
|>>> q &= Q(Role,'primary') == True
|>>> q &= Q(Role,'department')==Q(Department)
|>>> J(Role,Department).selectFirst( where = q )
|(<Role>,<Department>)
|>>> q = Q(Role,'user')==user
|>>> q &= Q(Role,'department')==Q(Department)
|>>> J(Q(Department,'name'),Q(Role,'primary')).select( where = True )
|[('Modeling',False),('Rigging',True)]
"""
self._options = list(table_or_queries)
self._database = options.get('db')
def addOption(self, table_or_query):
"""
Adds a new option to this join for lookup.
:param option <subclass of orb.Table> || <orb.Query>
"""
self._options.append(table_or_query)
def database(self):
"""
Returns the database instance that is linked to this join.
:return <orb.Database> || None
"""
if self._database:
return self._database
# use the first database option based on the tables
for option in self.options():
if isinstance(option, orb.Table):
return option.database()
from orb import Orb
return Orb.instance().database()
def options(self):
"""
Returns the options that this join is using.
:return [ <subclass of Table> || <Query>, .. ]
"""
return self._options
def selectFirst(self, **kwds):
"""
Selects records for the class based on the inputted
options. If no db is specified, then the current
global database will be used
:note From version 0.6.0 on, this method now accepts a mutable
keyword dictionary of values. You can supply any member
value for either the <orb.LookupOptions> or
<orb.ContextOptions>, as well as the keyword 'lookup' to
an instance of <orb.LookupOptions> and 'options' for
an instance of the <orb.ContextOptions>
:return (<variant>, ..)
"""
db = kwds.get('db')
lookup = orb.LookupOptions(**kwds)
options = orb.ContextOptions(**kwds)
if not db:
db = self.database()
if not db:
raise errors.DatabaseNotFound()
return db.backend().selectFirst(self, lookup, options)
def select(self, **kwds):
"""
Selects records for the joined information based on the inputted
options. If no db is specified, then the current
global database will be used
:note From version 0.6.0 on, this method now accepts a mutable
keyword dictionary of values. You can supply any member
value for either the <orb.LookupOptions> or
<orb.ContextOptions>, as well as the keyword 'lookup' to
an instance of <orb.LookupOptions> and 'options' for
an instance of the <orb.ContextOptions>
:return [ ( <variant>, .. ), .. ]
"""
db = kwds.get('db')
lookup = orb.LookupOptions(**kwds)
options = orb.ContextOptions(**kwds)
if not db:
db = self.database()
if not db:
raise errors.DatabaseNotFound()
return db.backend().select(self, lookup, options)
def setDatabase(self, database):
"""
Sets the database instance that this join will use.
:param database <orb.Database>
"""
self._database = database
def tables(self):
"""
Returns the list of tables used in this join instance.
:return [<orb.Table>, ..]
"""
output = []
for option in self._options:
if orb.Table.typecheck(option) or orb.View.typecheck(option):
output.append(option)
else:
output += option.tables()
return output
@staticmethod
def typecheck(obj):
"""
Returns whether or not the inputted object is a type of a query.
:param obj <variant>
:return <bool>
"""
return isinstance(obj, Join)
""" Defines an piping system to use when accessing multi-to-multi records. """
from projex.lazymodule import lazy_import
from orb import errors
from .recordset import RecordSet
orb = lazy_import('orb')
class PipeRecordSet(RecordSet):
def __init__(self,
records,
source,
pipeTable=None,
sourceColumn='',
targetColumn=''):
super(PipeRecordSet, self).__init__(records, sourceColumn=sourceColumn, source=source)
# define additional properties
self._pipeTable = pipeTable
self._targetColumn = targetColumn
def createRecord(self, values, **options):
# link an existing column to this recordset
if self._targetColumn in values:
record = self.table()(values[self._targetColumn])
self.addRecord(record)
return record
# otherwise, create a new record based on the target table and link it
else:
record = self.table().createRecord(values, **options)
self.addRecord(record)
return record
def addRecord(self, record, **options):
"""
Adds a new record for this pipe set. The inputted record should refer
to the value for the target column and be an instance of the
target type.
:param record | <orb.Table>
:return <orb.Table> || None | instance of the pipe table
"""
# make sure we have a valid table and record
table = self.table()
if not (table and record and record.isRecord() and isinstance(record, table)):
raise errors.OrbError('Invalid arguments for creating a record.')
# make sure we have a pipe table
if not self._pipeTable:
raise errors.OrbError('No pipe table defined for {0}'.format(self))
pipe = self._pipeTable
unique = options.pop('uniqueRecord', True)
if unique:
q = orb.Query(pipe, self.sourceColumn()) == self.source()
q &= orb.Query(pipe, self._targetColumn) == record
if pipe.selectFirst(where=q):
msg = 'A record already exists for {0} and {1}'.format(self.sourceColumn(), self._targetColumn)
raise errors.DuplicateEntryFound(msg)
values = {self.sourceColumn(): self.source(), self._targetColumn: record}
return pipe.createRecord(values, **options)
def clear(self, **options):
"""
Clears all the records through the pipeset based on the inputted
parameters.
:return <int>
"""
pipe = self._pipeTable
if not pipe:
return 0
q = orb.Query(pipe, self.sourceColumn()) == self.source()
where = options.pop('where', None)
q &= where
for key, value in options.items():
q &= orb.Query(pipe, key) == value
return pipe.select(where=q).remove()
def hasRecord(self, record, **options):
"""
Checks to see if the given record is in the record set for this
instance.
:param record | <orb.Table>
:return <bool>
"""
table = self.table()
if not (table and record and record.isRecord() and isinstance(record, table)):
return False
where = self.query() & (orb.Query(table) == record)
return table.selectFirst(where=where) is not None
def removeRecord(self, record, **options):
"""
Removes the record from this record set and from the database based
on the piping information.
:param record | <orb.Table>
:return <int> | number of records removed
"""
table = self.table()
if not (table and record and record.isRecord() and isinstance(record, table)):
return 0
pipe = self._pipeTable
if not pipe:
return 0
options.pop('uniqueRecord', True)
q = orb.Query(pipe, self.sourceColumn()) == self.source()
q &= orb.Query(pipe, self._targetColumn) == record
for key, value in options.items():
q &= orb.Query(pipe, key) == value
record = pipe.select(where=q).first()
if record is not None:
return record.remove()
else:
return 0
def remove(self, **options):
"""
Removes all the links within this set. This will remove
from the Pipe table and not from the source record.
"""
pipe = self._pipeTable
if not pipe:
return 0
q = orb.Query(pipe, self.sourceColumn()) == self.source()
for key, value in options.items():
q &= orb.Query(pipe, key) == value
return pipe.select(where=q).remove()
def update(self, **values):
if 'ids' in values:
return self.setRecords(values['ids'])
elif 'records' in values:
return self.setRecords([x.get('id') for x in values['records'] if x.get('id')])
else:
raise errors.OrbError('Invalid update parameters: {0}'.format(values))
def setRecords(self, records):
"""
Updates the linked records for this pipe by removing records not within the inputted
record set and adding any missing ones.
:param records | <orb.RecordSet> || [<orb.Table>, ..] || [<int>, ..]
:return <int> added, <int> removed
"""
pipe = self._pipeTable
if not pipe:
raise orb.errors.TableNotFound(self)
elif not isinstance(records, (orb.RecordSet, list, tuple)):
records = [records]
# determine the records to sync
curr_ids = self.ids()
record_ids = records.ids() if isinstance(records, orb.RecordSet) else [int(r) for r in records if r]
remove_ids = set(curr_ids) - set(record_ids)
add_ids = set(record_ids) - set(curr_ids)
# remove old records
if remove_ids:
q = orb.Query(pipe, self.sourceColumn()) == self.source()
q &= orb.Query(pipe, self._targetColumn).in_(remove_ids)
pipe.select(where=q).remove()
# create new records
if add_ids:
# noinspection PyShadowingBuiltins
rset = orb.RecordSet([pipe(**{self.sourceColumn(): self.source(), self._targetColumn: id})
for id in add_ids])
rset.commit()
return len(add_ids), len(remove_ids)
"""
Creates a class for manipulating groups of records at once.
This is useful when you need to remove a lot of records at one time, and is the
return result from the select mechanism that supports paging.
"""
# ------------------------------------------------------------------------------
import logging
import projex.iters
import projex.rest
import re
from collections import defaultdict
from projex.lazymodule import lazy_import
from xml.etree import ElementTree
from ..common import SearchMode
log = logging.getLogger(__name__)
orb = lazy_import('orb')
errors = lazy_import('orb.errors')
# ------------------------------------------------------------------------------
class RecordSetIterator(object):
def __init__(self, rset, batch=1):
self._rset = rset
self._page = 1
self._index = -1
self._pageSize = batch
self._records = []
def __iter__(self):
return self
def next(self):
self._index += 1
# get the next batch of records
if len(self._records) <= self._index:
self._records = list(self._rset.page(self._page, self._pageSize))
self._page += 1
self._index = 0
# stop the iteration when complete
if not self._records:
raise StopIteration()
else:
return self._records[self._index]
class RecordSet(object):
"""
Defines a class that can be used to manipulate table records in bulk. For
more documentation on the RecordSet, check out the
[[$ROOT/recordsets|record set documentation]].
"""
def __json__(self, *args):
"""
Returns this record set as a list of records.
:return [<orb.Record>, ..]
"""
return self.json()
def __init__(self, *args, **kwds):
self._table = None
self._sourceColumn = kwds.get('sourceColumn')
self._source = kwds.get('source')
# default options
self._cache = defaultdict(dict)
self._grouped = False
# sorting options
self._sort_cmp_callable = None
self._sort_key_callable = None
self._sort_reversed = False
# select information
self._database = -1
self._groupBy = None
self._lookupOptions = orb.LookupOptions(**kwds)
self._contextOptions = orb.ContextOptions(**kwds)
# join another record set as RecordSet(recordSet)
if args:
data = args[0]
if RecordSet.typecheck(data):
self.duplicate(data)
# join a list of records as RecordSet([record, record, record])
elif type(data) in (list, tuple):
self._cache['records'][None] = data[:]
if data and (orb.Table.recordcheck(data[0]) or orb.View.recordcheck(data[0])):
self._table = type(data[0])
# assign a table as the record set RecordSet(orb.Table) (blank record set for a given table)
elif orb.Table.typecheck(data) or orb.View.typecheck(data):
self._table = data
# define a blank recordset
if len(args) == 1:
self._cache['records'][None] = []
# define a cache for this recordset
elif type(args[1]) in (list, tuple):
self._cache['records'][None] = args[1][:]
# define a recordset that should lookup in the future
elif args[1] is None:
pass
def __len__(self):
"""
The length of this item will be its total count.
:return <int>
"""
# return the length of the all records cache
if None in self._cache['records']:
return len(self._cache['records'][None])
# return 0 for null record sets
return 0 if self.isNull() else self.count()
def __iter__(self):
"""
Return the fully resolved list of data.
:return <list>
"""
records = self.records()
for record in records:
yield record
def __add__(self, other):
"""
Adds two record set instances together and returns them.
:param other | <RecordSet> || <list>
"""
return self.union(other)
def __sub__(self, other):
return self.difference(other)
def __getitem__(self, value):
"""
Returns the item at the inputted index for this record set.
:return <orb.Table>
"""
# slice up this record set based on the inputted information
if type(value) == slice:
rset = RecordSet(self)
# set the start value for this record set
if value.start is not None:
if self.start() is not None:
rset.setStart(self.start() + value.start)
else:
rset.setStart(value.start)
# set the limit value for this record set
if value.stop is not None:
if value.start is not None:
rset.setLimit(value.stop - value.start)
else:
rset.setLimit(value.stop)
# slice up the records
if None in rset._cache['records']:
rset._cache['records'][None] = rset._cache['records'][None][value]
return rset
# return the record from a given index
else:
return self.recordAt(value)
def __nonzero__(self):
"""
Returns whether or not this record set is a non-zero entry.
:return <bool>
"""
return not self.isEmpty()
def _schemaBreakdown(self, **options):
"""
Returns this recordset broken down by its schema into each record.
If primaryOnly is set to True, then only the primary keys will be
grouped together, otherwise ful records will be.
:param primaryOnly | <bool>
:return [<orb.Table>: <orb.Query>, ..}
"""
# return the breakdown from the cache
if not options and None in self._cache['records']:
output = {}
for record in self._cache['records'][None]:
table = type(record)
output.setdefault(table, set())
output[table].add(record.primaryKey())
return {table: orb.Query(table).in_(keys)
for table, keys in output.items()}
else:
lookup = self.lookupOptions(**options)
return {self.table(): lookup.where}
def records(self, **options):
"""
Looks up all the records based on the current query information.
Additional options can include any options available for the
<orb.LookupOptions> or <orb.ContextOptions> classes that will be passed
to this recordset backend.
:return [<orb.Table>, ..]
"""
key = self.cacheKey(options)
# return the cached lookups
try:
return self._cache['records'][key]
except KeyError:
if self.isNull():
return []
table = self.table()
db = self.database()
lookup = self.lookupOptions(**options)
ctxt_opts = self.contextOptions(**options)
# create the lookup information
cache = table.recordCache()
# grab the database backend
backend = db.backend()
if not backend:
return []
# cache the result for this query
if cache is not None and orb.system.isCachingEnabled():
results = cache.select(backend, table, lookup, ctxt_opts)
else:
results = backend.select(table, lookup, ctxt_opts)
# return specific columns
if ctxt_opts.inflated and lookup.returning != 'values':
output = [self.inflateRecord(table, x) for x in results]
elif lookup.columns:
if lookup.returning == 'values':
cols = lookup.schemaColumns(self.table().schema())
if len(cols) == 1:
col = cols[0]
output = [x[col.fieldName()] for x in results]
else:
output = [[r.get(col.fieldName(), None) for col in cols]
for r in results]
else:
cols = lookup.schemaColumns(self.table().schema())
output = [{col.fieldName(): r.get(col.fieldName(), None) for col in cols} for r in results]
# return the raw results
else:
output = results
self._cache['records'][key] = output
# return sorted results from an in-place sort
if self._sort_cmp_callable is not None or \
self._sort_key_callable is not None:
return sorted(output,
cmp=self._sort_cmp_callable,
key=self._sort_key_callable,
reverse=self._sort_reversed)
return output
def cache(self, section, data, **options):
"""
Caches the given section and data value for this record set. The cache key will be determined
based on the inputted option set.
:param section | <str> | records, count, first or last
data | <variant>
**options
"""
key = self.cacheKey(options)
if data is not None:
self._cache[section][key] = data
else:
self._cache[section].pop(key, None)
def cacheKey(self, options):
"""
Returns the cache key for based on the inputted dictionary of
options.
:param options | <dict>
:return <hash>
"""
options = {k: v for k, v in options.items() if k != 'expand'}
if not options:
return None
else:
return hash(self.lookupOptions(**options))
def clear(self):
"""
Clears all the cached information such as the all() records,
records, and total numbers.
"""
self._cache.clear()
def commit(self, **options):
"""
Commits the records associated with this record set to the database.
"""
db = options.pop('db', self.database())
backend = db.backend()
lookup = self.lookupOptions(**options)
db_options = self.contextOptions(**options)
records = self.records(**options)
# run each records pre-commit logic before it is inserted to the db
for record in records:
record.callbacks().emit('aboutToCommit(Record,LookupOptions,ContextOptions)', record, lookup, db_options)
inserts = filter(lambda x: not x.isRecord(db=db), records)
updates = filter(lambda x: x.isRecord(db=db), records)
if inserts:
backend.insert(inserts, lookup, db_options)
if updates:
backend.update(updates, lookup, db_options)
# update the caching table information
for table in set([type(record) for record in records]):
table.markTableCacheExpired()
# run each records pre-commit logic before it is inserted to the db
for record in records:
record.callbacks().emit('commitFinished(Record,LookupOptions,ContextOptions)', record, lookup, options)
return True
def count(self, **options):
"""
Collects the count of records for this record set. Additional options
can include any options available for the <orb.LookupOptions> or
<orb.ContextOptions> classes that will be passed to this records
backend.
:return <int>
"""
table = self.table()
db = self.database()
key = self.cacheKey(options)
try:
return self._cache['count'][key]
except KeyError:
try:
return len(self._cache['records'][key])
except KeyError:
if self.isNull():
return 0
lookup = self.lookupOptions(**options)
ctxt_opts = self.contextOptions(**options)
# don't lookup unnecessary data
lookup.columns = table.schema().primaryColumns()
lookup.expand = None # no need to include anything fancy
lookup.order = None # it does not matter the order for counting purposes
# retrieve the count information
cache = table.recordCache()
if cache is not None and orb.system.isCachingEnabled():
count = cache.count(db.backend(), table, lookup, ctxt_opts)
else:
count = db.backend().count(table, lookup, ctxt_opts)
self._cache['count'][key] = count
return count
def columns(self):
"""
Returns the columns that this record set should be querying for.
:return [<str>, ..] || None
"""
return self._lookupOptions.columns
def createRecord(self, values, **options):
"""
Creates a new record for this recordset. If this set was generated from a reverse lookup, then a pointer
back to the source record will automatically be generated.
:param values | <dict>
:return <orb.Table>
"""
if self.sourceColumn():
values.setdefault(self.sourceColumn(), self.source())
if isinstance(self.table(), orb.View):
raise errors.ActionNotAllowed('View tables are read-only.')
return self.table().createRecord(values, **options)
def currentPage(self):
"""
Returns the current page that this record set represents.
:return <int>
"""
return self._lookupOptions.page
def database(self):
"""
Returns the database instance that this recordset will use.
:return <Database>
"""
if self._database != -1:
return self._database
db = self.table().getDatabase() if self.table() else orb.system.database()
if not db:
raise errors.DatabaseNotFound()
self._database = db
return db
def contextOptions(self, **options):
"""
Returns the database options for this record set. See the
<orb.ContextOptions> documentation about the optional arguments.
:return <orb.ContextOptions>
"""
opts = orb.ContextOptions(options=self._contextOptions)
opts.update(options)
return opts
def difference(self, records):
"""
Joins together a list of records or another record set to this instance.
:param records | <RecordSet> || <list> || None
:return <bool>
"""
out = RecordSet(self)
if isinstance(records, RecordSet):
out._lookupOptions.where = records.lookupOptions().where.negated() & out._lookupOptions.where
elif orb.Query.typecheck(records):
out._lookupOptions.where = records.negated() & out._lookupOptions.where
elif type(records) in (list, tuple):
out._cache['records'][None] = self.records() + records
else:
raise TypeError(records)
return out
# noinspection PyProtectedMember
def duplicate(self, other):
"""
Duplicates the data from the other record set instance.
:param other | <RecordSet>
"""
# default options
self._grouped = other._grouped
self._cache.update({k: v.copy() for k, v in other._cache.items()})
# sorting options
self._sort_cmp_callable = other._sort_cmp_callable
self._sort_key_callable = other._sort_key_callable
self._sort_reversed = other._sort_reversed
# select information
self._database = other._database
self._groupBy = list(other._groupBy) if other._groupBy else None
self._contextOptions = other.contextOptions().copy()
self._lookupOptions = other.lookupOptions().copy()
self._table = other._table
def distinct(self, columns, **options):
"""
Returns a distinct series of column values for the current record set
based on the inputted column list.
The additional options are any keyword arguments supported by the
<orb.LookupOptions> or <orb.ContextOptions> classes.
If there is one column supplied, then the result will be a list,
otherwise, the result will be a dictionary.
:param column | <str> || [<str>, ..]
**options
:return [<variant>, ..] || {<str> column: [<variant>, ..]}
"""
# ensure we have a database and table class
if self.isNull():
return {} if len(columns) > 1 else []
table = self.table()
schema = table.schema()
# ensure we have a list of values
if not type(columns) in (list, tuple):
columns = [schema.column(columns)]
else:
columns = [schema.column(col) for col in columns]
db = self.database()
# return information from the database
cache = table.recordCache()
schema = table.schema()
results = {}
if options.get('inflated', True):
for column in columns:
if column.isReference():
ref_model = column.referenceModel()
lookup = self.lookupOptions(**options)
lookup.columns = [column]
lookup.distinct = True
lookup.order = None
rset = orb.RecordSet(self)
rset.setLookupOptions(lookup)
rset.setContextOptions(self.contextOptions(**options))
results[column] = ref_model.select(where=orb.Query(ref_model).in_(rset))
lookup_columns = list(set(columns) - set(results.keys()))
else:
lookup_columns = list(columns)
# perform an actual lookup
if lookup_columns:
ctxt_opts = self.contextOptions(**options)
lookup = self.lookupOptions(**options)
lookup.columns = lookup_columns
backend = db.backend()
if cache is not None and orb.system.isCachingEnabled():
output = cache.distinct(backend, table, lookup, ctxt_opts)
else:
output = backend.distinct(table, lookup, ctxt_opts)
else:
output = {}
results.update({schema.column(k): v for k, v in output.items()})
return results.get(columns[0], []) if len(columns) == 1 else results
def first(self, **options):
"""
Returns the first record that matches the current query.
:return <orb.Table> || None
"""
table = self.table()
db = self.database()
key = self.cacheKey(options)
try:
out = self._cache['first'][key]
out.updateOptions(**options)
return out
except KeyError:
try:
return self._cache['records'][key][0]
except IndexError:
return None
except KeyError:
options['limit'] = 1
lookup = self.lookupOptions(**options)
ctxt_opts = self.contextOptions(**options)
if self.isNull():
return None
lookup.order = lookup.order or [(table.schema().primaryColumns()[0].name(), 'asc')]
# retrieve the data from the cache
cache = table.recordCache()
if cache is not None and orb.system.isCachingEnabled():
records = cache.select(db.backend(), table, lookup, ctxt_opts)
else:
records = db.backend().select(table, lookup, ctxt_opts)
if records:
if ctxt_opts.inflated and lookup.returning!= 'values':
record = self.inflateRecord(table, records[0], **options)
record.setLookupOptions(lookup)
record.setContextOptions(ctxt_opts)
else:
record = records[0]
else:
record = None
self._cache['first'][key] = record
return record
def filter(self, **options):
"""
Shortcut for refining this object set based on a dictionary based matching. This is the same as doing
`self.refine(Q.build(options))`.
:param **options | key/value pairing of options to filter by
:return <orb.RecordSet>
"""
if not options:
return self
else:
return self.refine(orb.Query.build(options))
def groupBy(self):
"""
Returns the grouping information for this record set.
:return [<str>, ..] || None
"""
return self._groupBy
def grouped(self, grouping=None, **options):
"""
Returns the records in a particular grouping. If the groupBy option
is left as None, then the base grouping for this instance will be used.
:param groupBy | <str> columnName || [<str> columnName, ..] || None
:return { <variant> grouping: <orb.RecordSet>, .. }
"""
if grouping is None:
grouping = self.groupBy()
if not type(grouping) in (list, tuple):
grouping = [grouping]
table = self.table()
output = {}
if grouping:
grp_format = grouping[0]
remain = grouping[1:]
# look for advanced grouping
if '{' in grp_format:
formatted = True
columns = list(set(re.findall('\{(\w+)', grp_format)))
else:
formatted = False
columns = [grp_format]
values = self.distinct(columns, **options)
for value in values:
lookup = self.lookupOptions(**options)
db_options = self.contextOptions(**options)
# generate the sub-grouping query
options = {}
if len(columns) > 1:
sub_query = orb.Query()
for i, column in enumerate(columns):
sub_query &= orb.Query(column) == value[i]
options[column] = value[i]
else:
sub_query = orb.Query(columns[0]) == value
options[columns[0]] = value
# define the formatting options
if formatted:
key = grp_format.format(**options)
else:
key = value
# assign or merge the output for the grouping
if key in output:
sub_set = output[key]
sub_set.setQuery(sub_set.query() | sub_query)
else:
sub_set = RecordSet(table, None)
lookup = self.lookupOptions(**options)
db_options = self.contextOptions(**options)
# join the lookup options
if lookup.where is None:
lookup.where = sub_query
else:
lookup.where &= sub_query
sub_set.setLookupOptions(lookup)
sub_set.setContextOptions(db_options)
if remain:
sub_set = sub_set.grouped(remain, **options)
output[key] = sub_set
return output
def hasRecord(self, record, **options):
"""
Returns whether or not this record set contains the inputted record.
:param record | <orb.Table>
:return <bool>
"""
try:
id_ = record.id()
if type(record) != self.table():
return False
except AttributeError:
try:
id_ = record['id']
except KeyError:
id_ = record
return id_ in self.ids()
def ids(self, **options):
"""
Returns a list of the ids that are associated with this record set.
:sa primaryKeys
:return [<variant>, ..]
"""
try:
return [record.id() if isinstance(record, orb.Table) else record for record in self._cache['ids'][None]]
except KeyError:
pass
if self.table():
cols = self.table().schema().primaryColumns()
cols = [col.fieldName() for col in cols]
return self.values(cols, **options)
return self.values(orb.system.settings().primaryField(), **options)
def inflateRecord(self, table, record, **options):
"""
Inflates the record for the given class, applying a namespace override
if this record set is using a particular namespace.
:param table | <subclass of orb.Table>
record | <dict>
:return <orb.Table>
"""
inst = table.inflateRecord(record, options=self.contextOptions(**options))
inst.setLookupOptions(self.lookupOptions(**options))
return inst
def index(self, record):
"""
Returns the index of the inputted record within the all list.
:param record | <orb.Table>
:return <int>
"""
if not record:
return -1
else:
try:
return self._cache['records'][None].index(record)
except KeyError:
return self.ids().index(record.id())
def indexed(self, columns=None, **options):
"""
Returns the records in a particular grouping. If the groupBy option
is left as None, then the base grouping for this instance will be used.
:param columns | <str> columnName || [<str> columnName, ..]
:return { <variant> grouping: <orb.Table>, .. }
"""
if columns is None:
return dict([(x.primaryKey(), x) for x in self.records(**options)])
if not type(columns) in (list, tuple):
columns = [columns]
table = self.table()
output = {}
indexInflated = options.pop('indexInflated', True)
if columns:
for record in self.records(**options):
key = []
for column in columns:
key.append(record.recordValue(column, inflated=indexInflated))
if len(key) == 1:
output[key[0]] = record
else:
output[tuple(key)] = record
return output
def insertInto(self, db, **options):
"""
Inserts these records into another database. This method will allow
for easy duplication of one record in one database to another database.
:param db | <orb.Database>
"""
if self.database() == db:
return False
lookup = orb.LookupOptions(**options)
ctxt_opts = orb.ContextOptions(**options)
ctxt_opts.force = True
backend = db.backend()
records = self.records()
backend.insert(records, lookup, ctxt_opts)
return True
def isEmpty(self, **options):
"""
Returns whether or not this record set contains any records.
:return <bool>
"""
if self.isNull():
return True
try:
return len(self._cache['records'][None]) == 0
except KeyError:
pass
# better to assume that we're not empty on slower connections
if orb.system.settings().optimizeDefaultEmpty():
return False
key = self.cacheKey(options)
try:
return self._cache['empty'][key]
except KeyError:
try:
return len(self._cache['records'][key]) == 0
except KeyError:
# define custom options
options['columns'] = ['id']
options['limit'] = 1
options['inflated'] = False
is_empty = self.first(**options) is None
self._cache['empty'][key] = is_empty
return is_empty
def isGrouped(self):
"""
Returns whether or not this record set is intended to be grouped. This
method is used to share the intended default usage. This does not force
a record set to be grouped or not.
:return <bool>
"""
return self._grouped
def isInflated(self):
"""
Returns whether or not this record set will be inflated.
:return <bool>
"""
return self._lookupOptions.returning != 'values' and self._contextOptions.inflated
def isLoaded(self):
"""
Returns whether or not this record set already is loaded or not.
:return <bool>
"""
return len(self._cache['records']) != 0
def isOrdered(self):
"""
Returns whether or not this record set is intended to be ordered. This
method is used to share the intended default usage. This does not force
a record set to be grouped or not.
:return <bool>
"""
return self._lookupOptions.order is not None
def isNull(self):
"""
Returns whether or not this record set can contain valid data.
:return <bool>
"""
table = self.table()
db = self.database()
return not (table and db and db.backend())
def isThreadEnabled(self):
"""
Returns whether or not this record set is threadable based on its
database backend.
:return <bool>
"""
db = self.database()
if db:
return db.isThreadEnabled()
return False
def iterrecords(self, batch=1):
"""
Creates a recordset iterator that will loop through the contents of this
record set at a particular size. This will reduce the number of calls made to the
database at a single time.
:param batch:int
:return: <orb.data.recordset.RecordSetIterator
"""
return RecordSetIterator(self, batch)
def update(self, **values):
"""
Updates the records within this set based on the inputted values.
:param **values | <dict>
"""
raise errors.ActionNotAllowed('Bulk editing of records is not allowed.')
def updateOptions(self, **options):
self._lookupOptions.update(options)
self._contextOptions.update(options)
def union(self, records):
"""
Joins together a list of records or another record set to this instance.
:param records | <RecordSet> || <list> || None
:return <bool>
"""
out = RecordSet(self)
if isinstance(records, RecordSet):
out._lookupOptions.where = records.lookupOptions().where | out._lookupOptions.where
elif orb.Query.typecheck(records):
out._lookupOptions.where = records | out._lookupOptions.where
elif type(records) in (list, tuple):
out._cache['records'][None] = self.records() + records
else:
raise TypeError(records)
return out
def json(self, **options):
"""
Returns the records for this set as a json string.
:return <str>
"""
lookup = self.lookupOptions(**options)
context = self.contextOptions(**options)
# lookup specific return types
if lookup.returning == 'count':
return len(self)
elif lookup.returning == 'first':
return self.first().json()
elif lookup.returning == 'last':
return self.last().json()
tree = lookup.expandtree()
output = {}
if 'first' in tree:
options['expand'] = tree.pop('first')
output['first'] = self.first(**options)
if 'last' in tree:
options['expand'] = tree.pop('last')
output['last'] = self.last(**options)
if 'ids' in tree:
tree.pop('ids')
output['ids'] = self.ids(**options)
if 'count' in tree:
tree.pop('count')
output['count'] = self.count(**options)
if 'records' in tree:
sub_tree = tree.pop('records', tree)
sub_tree.update(tree)
options['expand'] = sub_tree
if lookup.returning != 'values':
output['records'] = [record.json(lookup=lookup, options=context) for record in self.records()]
else:
output['records'] = self.records()
if not output:
if lookup.returning != 'values':
output = [record.json(lookup=lookup, options=context) for record in self.records()]
else:
output = self.records()
if context.format == 'text':
return projex.rest.jsonify(output)
else:
return output
def last(self, **options):
"""
Returns the last record for this set by inverting the order of the lookup. If
no order has been defined, then the primary column will be used as the ordering.
:return <orb.Table> || None
"""
key = self.cacheKey(options)
try:
out = self._cache['last'][key]
try:
updater = out.updateOptions
except AttributeError:
pass
else:
updater(**options)
return out
except KeyError:
try:
return self._cache['records'][key][-1]
except KeyError:
if self.isNull():
return None
record = self.reversed().first(**options)
self._cache['last'][key] = record
return record
def limit(self):
"""
Returns the limit for this record set.
:return <int>
"""
return self._lookupOptions.limit
def lookupOptions(self, **options):
"""
Returns the lookup options for this record set.
:return <orb.LookupOptions>
"""
lookup = orb.LookupOptions(lookup=self._lookupOptions)
lookup.update(options)
return lookup
def namespace(self):
"""
Returns the namespace for this query.
:return <str> || None
"""
return self._contextOptions.namespace
def order(self):
"""
Returns the ordering information for this record set.
:return [(<str> field, <str> asc|desc), ..] || None
"""
return self._lookupOptions.order
def ordered(self, *order):
"""
Returns a newly ordered record set based on the inputted ordering.
:param order | [(<str> column, <str> asc | desc), ..]
:return <orb.RecordSet>
"""
out = RecordSet(self)
out.setOrder(order)
return out
def pageCount(self, pageSize=None):
"""
Returns the number of pages that this record set contains. If no page
size is specified, then the page size for this instance is used.
:sa setPageSize
:param pageSize | <int> || None
:return <int>
"""
# if there is no page size, then there is only 1 page of data
if pageSize is None:
pageSize = self.pageSize()
else:
pageSize = max(0, pageSize)
if not pageSize:
return 1
# determine the number of pages in this record set
pageFraction = self.totalCount() / float(pageSize)
pageCount = int(pageFraction)
# determine if there is a remainder of records
remain = pageFraction % 1
if remain:
pageCount += 1
return pageCount
def page(self, pageno, pageSize=None):
"""
Returns the records for the current page, or the specified page number.
If a page size is not specified, then this record sets page size will
be used.
:param pageno | <int>
pageSize | <int>
:return <orb.RecordSet>
"""
if pageSize is None:
pageSize = self.pageSize()
else:
pageSize = max(0, pageSize)
# for only 1 page of information, return all information
if not pageSize:
return RecordSet(self)
# lookup the records for the given page
start = pageSize * (pageno - 1)
limit = pageSize
# returns a new record set with this start and limit information
output = RecordSet(self)
output.setCurrentPage(pageno)
output.setPageSize(pageSize)
output.setStart(start)
output.setLimit(limit)
return output
def paged(self, pageSize=None):
"""
Returns a broken up set of this record set based on its paging
information.
:return [<orb.RecordSet>, ..]
"""
if pageSize is None:
pageSize = self.pageSize()
else:
pageSize = max(0, pageSize)
if not pageSize or self.isEmpty():
return []
count = self.pageCount(pageSize)
pages = []
for i in range(count):
page = RecordSet(self)
page.setStart(i * pageSize)
page.setLimit(pageSize)
pages.append(page)
return pages
def pages(self, pageSize=None):
"""
Returns a range for all the pages in this record set.
:return [<int>, ..]
"""
return range(1, self.pageCount(pageSize) + 1)
def pageSize(self):
"""
Returns the default page size for this record set. This can be used
with the paging mechanism as the default value.
:return <int>
"""
return self._lookupOptions.pageSize or orb.system.settings().defaultPageSize()
def primaryKeys(self, **options):
"""
Returns a list of keys for the records defined for this recordset.
:return [<variant>, ..]
"""
return self.ids(**options)
def query(self):
"""
Returns the query for this record set.
:return <Query> || <QueryCompound> || None
"""
return self._lookupOptions.where
def recordAt(self, index, **options):
"""
Returns the record at the given index and current query information.
Additional options can include any options available for the
<orb.LookupOptions> or <orb.ContextOptions> classes that will be passed
to this recordset backend.
:return [<orb.Table>, ..]
"""
has_default = 'default' in options
default = options.pop('default', None)
key = self.cacheKey(options)
try:
return self._cache['records'][key][index]
except KeyError:
if self.isNull():
if has_default:
return default
else:
raise IndexError(index)
record = self.first(**options)
if record is not None:
return record
elif has_default:
return default
else:
raise IndexError(index)
def refine(self, *args, **options):
"""
Creates a subset of this record set with a joined query based on the
inputted search text. The search will be applied to all columns that are
marked as searchable.
:return <RecordSet>
"""
rset = RecordSet(self)
# backward compatibility support
if args and orb.Query.typecheck(args[0]):
options.setdefault('where', args[0])
rset.setLookupOptions(self.lookupOptions(**options))
rset.setContextOptions(self.contextOptions(**options))
if 'terms' in options and options['terms']:
return rset.search(options['terms'])
else:
return rset
def remove(self, **options):
"""
Removes the records from this set based on the inputted removal mode.
:note As of version 0.6.0 on, this method accepts variable
keyword arguments. This is to support legacy code,
the preferred method to call is to pass in options =
<orb.ContextOptions>, however, you can supply any
members as keyword arguments and it will generate an
options instance for you.
:return <int>
"""
if self.isNull() or self.isEmpty():
return 0
try:
backend = self.database().backend()
except AttributeError:
return 0
# include cascading records
breakdown = self._schemaBreakdown()
dbopts = self.contextOptions(**options)
count = 0
for table, query in breakdown.items():
lookup = orb.LookupOptions(where=query)
count += backend.remove(table, lookup, dbopts)
table.markTableCacheExpired()
return count
def reversed(self, **options):
"""
Returns a recordset with the reversed order from this set.
:return <orb.RecordSet>
"""
rset = RecordSet(self)
order = options.get('order') or self.order() or [(self.table().schema().primaryColumns()[0], 'asc')]
order = [(col, 'asc' if direct == 'desc' else 'desc') for col, direct in order]
rset.setOrder(order)
return rset
def search(self,
search_terms,
mode=SearchMode.All,
limit=None,
useThesaurus=True):
"""
Creates a subset of this record set with a joined query based on the
inputted search text. The search will be applied to all columns that are
marked as searchable.
:sa Column.setSearchable
:param search_terms | [<str>, ..] || <str>
:return <RecordSet>
"""
if not self.table():
return RecordSet()
if not search_terms:
return RecordSet(self)
engine = self.table().schema().searchEngine()
output = self.refine(engine.parseQuery(self.table(), search_terms))
if limit is not None:
output.setLimit(limit)
return output
def setColumns(self, columns):
"""
Sets the columns that this record set should be querying the database
for.
:param columns | [<str>, ..] || None
"""
self._lookupOptions.columns = columns
def setCurrentPage(self, pageno):
"""
Sets the current page number that this record set is on.
:param pageno | <int>
"""
self._lookupOptions.page = pageno
def setDatabase(self, database):
"""
Sets the database instance for this record set. If it is left blank,
then the default orb database for the table class will be used.
:param database | <Database> || None
"""
self._database = database
def setGrouped(self, state):
"""
Sets whether or not this record set is intended to be grouped. This
method is used to share the intended default usage. This does not force
a record set to be grouped or not.
:return state | <bool>
"""
self._grouped = state
def setGroupBy(self, groupBy):
"""
Sets the group by information that this record set will use.
:param groupBy | [<str>, ..] || None
"""
self._groupBy = groupBy
def setContextOptions(self, options):
"""
Sets the database options for selectin to the inputted options.
:param options | <orb.ContextOptions>
"""
self._contextOptions = options.copy()
def setInflated(self, state):
"""
Sets whether or not by default the results from this record set should
be inflated.
:param state | <bool> || None
"""
self._contextOptions.inflated = state
def setLimit(self, limit):
"""
Sets the limit for this record set.
:param limit | <int>
"""
self._lookupOptions.limit = limit
def setLookupOptions(self, lookup):
"""
Sets the lookup options for this instance to the inputted lookup data.
:param lookup | <orb.LookupOptions>
"""
self._lookupOptions = lookup.copy()
def setNamespace(self, namespace):
"""
Sets the namespace information for this recordset to the given namespace
:param namespace | <str>
"""
self._contextOptions.namespace = namespace
def setOrder(self, order):
"""
Sets the field order that this record set will use.
:param order | [(<str> field, <str> asc|desc), ..] || None
"""
self._lookupOptions.order = order
def setPageSize(self, pageSize):
"""
Sets the page size for this record set to the inputted page size.
:param pageSize | <int>
"""
self._lookupOptions.pageSize = pageSize
def setQuery(self, query):
"""
Sets the query that this record set will use. This will also clear the
cache information since it will alter what is being stored.
:param query | <Query> || <QueryCompound> || None
"""
self._lookupOptions.where = query
self.clear()
def setValues(self, **values):
"""
Sets the values within this record set to the inputted value dictionary
or keyword mapping.
"""
for record in self.records():
for key, value in values.items():
record.setRecordValue(key, value)
def setSourceColumn(self, column):
"""
Sets the column that was used to call and generate this recordset. This method is
used within the reverselookupmethod and is not needed to be called directly.
:param column | <str>
"""
self._sourceColumn = column
def setSource(self, source):
"""
Sets the record that was used to call and generate this recordset. This method is
used within the reverselookupmethod and is not needed to be called directly.
:param source | <orb.Table> || None
"""
self._source = source
def setStart(self, index):
"""
Sets the start index for this query.
:param index | <int>
"""
self._lookupOptions.start = index
def sumOf(self, columnName):
"""
Returns the sum of the values from the column name.
:return <int>
"""
return sum(self.values(columnName))
def sourceColumn(self):
"""
When used in a reverse lookup, the source column represents the column from the record that created
this recordset lookup.
:return <str> || None
"""
return self._sourceColumn
def source(self):
"""
When used in a reverse lookup, the source is a pointer to the record that generated this recordset
lookup.
:return <orb.Table> || None
"""
return self._source
# noinspection PyShadowingBuiltins
def sort(self, cmp=None, key=None, reverse=False):
"""
Sorts the resulted all records by the inputted arguments.
:param *args | arguments
"""
self._sort_cmp_callable = cmp
self._sort_key_callable = key
self._sort_reversed = reverse
def start(self):
"""
Returns the start index for this query.
:return <int>
"""
return self._lookupOptions.start
def table(self):
"""
Returns the table class that this record set is associated with.
:return <subclass of orb.Table>
"""
return self._table
def totalCount(self):
"""
Returns the total number of records in this result set vs. the default count which
will factor in the page size information.
:return <int>
"""
return self.count(start=0, limit=0, page=0, pageSize=0)
def toXml(self, xparent=None):
"""
Converts this recordset to a representation of XML.
:param xparent | <ElementTree.Element> || None
:return <ElementTree.Element>
"""
xset = ElementTree.SubElement(xparent, 'recordset') if xparent is not None else ElementTree.Element('recordset')
xset.set('table', self.table().schema().name())
xlookup = ElementTree.SubElement(xset, 'lookup')
xoptions = ElementTree.SubElement(xset, 'options')
self._lookupOptions.toXml(xlookup)
self._contextOptions.toXml(xoptions)
return xset
def values(self, columns, **options):
"""
Returns either a list of values for all the records if the inputted arg
is a column name, or a dictionary of columnName values for multiple
records for all the records in this set.
:param columns | <str> || [<str>, ..]
:return [<variant>, ..] || {<str> column: [<variant>, ..], ..}
"""
if self.isNull() or self.table() is None:
return []
schema = self.table().schema()
if type(columns) not in (list, set, tuple):
columns = [schema.column(columns)]
else:
columns = [schema.column(col) for col in columns]
key = self.cacheKey(options)
db = self.database()
# create the lookup options
options['columns'] = columns[:]
lookup = self.lookupOptions(**options)
ctxt_opts = self.contextOptions(**options)
# lookup the data
cache = self.table().recordCache()
if key in self._cache['records']:
records = self._cache['records'][key]
elif cache:
records = cache.select(db.backend(), self.table(), lookup, ctxt_opts)
else:
records = db.backend().select(self.table(), lookup, ctxt_opts)
# parse the values from the cache
output = defaultdict(list)
locale = ctxt_opts.locale
inflated = ctxt_opts.inflated and lookup.returning != 'values'
for record in records:
for column in columns:
expand = bool(column.isReference() and inflated)
# retrieve the value
if orb.Table.recordcheck(record) or orb.View.recordcheck(record):
value = record.recordValue(column, inflated=expand)
else:
value = record.get(column.fieldName())
# grab specific locale translation options
if column.isTranslatable() and type(value) == dict:
if locale != 'all':
value = value.get(locale, '')
# expand a reference object if desired
if expand and value is not None:
ref_model = column.referenceModel()
if not ref_model:
output[column].append(None)
elif not isinstance(value, ref_model):
output[column].append(ref_model(value))
else:
output[column].append(value)
# de-expand an already loaded reference object if IDs are all that is wanted
elif not expand and (orb.Table.recordcheck(value) or orb.View.recordcheck(value)):
output[column].append(value.id())
# return a standard item
else:
output[column].append(value)
if len(output) == 1:
return output[columns[0]]
elif output:
return zip(*[output[column] for column in columns])
else:
return []
def view(self, name):
"""
Returns a set of records represented as a view that matches the inputted name.
:param name | <str>
:return <orb.RecordSet> || None
"""
table = self.table()
if not table:
return None
view = table.schema().view(name)
if view:
if not self.isEmpty():
return view.select(where=orb.Query(view).in_(self), options=self.contextOptions())
else:
return orb.RecordSet(view)
else:
return None
@staticmethod
def typecheck(value):
"""
Checks to see if the inputted type is of a Recordset
:param value | <variant>
:return <bool>
"""
return isinstance(value, RecordSet)
@staticmethod
def fromXml(xset):
"""
Restores a recordset from XML.
:param xset | <ElementTree.Element> || None
:return <orb.RecordSet>
"""
model = orb.system.model(xset.get('table'))
lookup = orb.LookupOptions.fromXml(xset.find('lookup'))
options = orb.ContextOptions.fromXml(xset.find('options'))
return model.select(lookup=lookup, options=options)
"""
Defines the DataStore class that will convert Column value types for different
backends to a base type.
"""
import time
import cPickle
import datetime
import decimal
import orb
import orb.errors
import projex.rest
from projex.addon import AddonManager
from projex.lazymodule import lazy_import
from projex.text import nativestring as nstr
from .converter import DataConverter
yaml = lazy_import('yaml')
pytz = lazy_import('pytz')
class DataStore(AddonManager):
def restore(self, column, db_value):
"""
Restores the inputted value from the database to a Python value.
:param column | <orb.Column>
db_value | <variant>
:return <variant> | python value
"""
if not column:
return db_value
col_type = orb.ColumnType.base(column.columnType())
if db_value is None:
return db_value
elif col_type == orb.ColumnType.Pickle:
try:
return cPickle.loads(nstr(db_value))
except StandardError:
raise orb.errors.DataStoreError('Failed to restore pickle.')
elif col_type == orb.ColumnType.Yaml:
try:
return yaml.loads(nstr(db_value))
except StandardError:
raise orb.errors.DataStoreError('Failed to restore yaml.')
elif col_type == orb.ColumnType.JSON:
try:
return projex.rest.unjsonify(db_value)
except StandardError:
raise orb.errors.DataStoreError('Failed to restore JSON.')
elif col_type == orb.ColumnType.Query:
if type(db_value) == dict:
return orb.Query.fromDict(db_value)
else:
try:
return orb.Query.fromXmlString(nstr(db_value))
except StandardError:
raise orb.errors.DataStoreError('Failed to restore query.')
elif col_type in (orb.ColumnType.Timestamp, orb.ColumnType.Timestamp_UTC):
if isinstance(db_value, (int, float)):
return datetime.datetime.fromtimestamp(db_value)
elif col_type == orb.ColumnType.Dict:
return projex.rest.unjsonify(nstr(db_value))
elif column.isString():
return projex.text.decoded(db_value)
elif type(db_value) == decimal.Decimal:
return float(db_value)
else:
return db_value
def fromString(self, value_str):
"""
Converts the inputted string to a standard Python value.
:param value_str | <str>
:return <variant>
"""
try:
return eval(value_str)
except StandardError:
return value_str
def store(self, column, py_value):
"""
Prepares the inputted value from Python to a value that the database
can store.
:param column | <orb.Column>
py_value | <variant>
:return <variant>
"""
col_type = orb.ColumnType.base(column.columnType())
py_value = DataConverter.toPython(py_value)
if py_value is None:
return None
# save a query
elif col_type == orb.ColumnType.Query:
if type(py_value) == dict:
py_value = orb.Query.fromDict(py_value)
if orb.Query.typecheck(py_value):
try:
return py_value.toXmlString()
except StandardError:
raise orb.errors.DataStoreError('Unable to convert Query to XML')
# save a pickle
elif col_type == orb.ColumnType.Pickle:
return cPickle.dumps(py_value)
# save a yaml
elif col_type == orb.ColumnType.Yaml:
try:
return yaml.dumps(py_value)
except ImportError:
raise orb.errors.DependencyNotFound('PyYaml')
except StandardError:
raise orb.errors.DataStoreError('Unable to convert to yaml')
# save as JSON
elif col_type == orb.ColumnType.JSON:
try:
return projex.rest.jsonify(py_value)
except StandardError:
raise orb.errors.DataStoreError('Unable to convert to JSON')
# save a record set
elif orb.RecordSet.typecheck(py_value):
return py_value.primaryKeys()
# save a record
elif orb.Table.recordcheck(py_value) or orb.View.recordcheck(py_value):
return py_value.primaryKey() if py_value.isRecord() else None
# save a list/tuple/set
elif type(py_value) in (list, tuple, set):
return tuple([self.store(column, x) for x in py_value])
# save a timedelta
elif type(py_value) == datetime.timedelta:
now = datetime.datetime.now()
dtime = now + py_value
return self.store(column, dtime)
# save a datetime
elif type(py_value) == datetime.datetime:
if col_type in (orb.ColumnType.Timestamp, orb.ColumnType.Timestamp_UTC):
return time.mktime(py_value.timetuple())
# convert timezone information to UTC data
if py_value.tzinfo is not None:
try:
return py_value.astimezone(pytz.utc).replace(tzinfo=None)
except ImportError:
raise orb.errors.DependencyNotFound('pytz')
return py_value
# save a dictionary
elif type(py_value) == dict:
return projex.rest.jsonify(py_value)
# save a string
elif type(py_value) in (str, unicode):
return projex.text.decoded(py_value)
# save a basic python value
else:
return py_value
def toString(self, value):
"""
Converts the inputted value to a string representation.
:param value | <variant>
:return <str>
"""
return nstr(value)
""" Defines an overall management class for all environments, databases,
and schemas. """
import datetime
import glob
import logging
import os
import projex.security
import projex.text
from projex.lazymodule import lazy_import
from projex.text import nativestring as nstr
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
from .settings import Settings
log = logging.getLogger(__name__)
orb = lazy_import('orb')
pytz = lazy_import('pytz')
tzlocal = lazy_import('tzlocal')
class Manager(object):
_instance = None
def __init__(self):
# system wide options
self._environment = None # current environment
self._database = None # current database
self._tableclass = None # base table class (default: orb.Table)
self._namespace = '' # current namespace
self._token = None # security token -- CREATE NEW TOKEN!!!!
self._cache = None # global cache manager
self._searchEngine = orb.SearchEngine()
# orb file loading/merging
self._filename = '' # current filename (*.orb file)
self._referenceFiles = [] # referenced files (*.orb files)
self._merging = False # whether or not we're actively merging
# settings instance
self._settings = Settings()
# i18n options
self._locale = os.environ.get('ORB_LOCALE', 'en_US')
self._baseTimezone = None
self._timezone = None
# registry
self._environments = set()
self._databases = set()
self._groups = set()
self._schemas = set()
self._properties = {}
self._customEngines = {}
def asutc(self, dtime):
tz = self.baseTimezone() or self.timezone()
if tz is not None:
# ensure we have some timezone information before converting to UTC time
if dtime.tzinfo is None:
dtime = tz.localize(dtime, is_dst=None)
return dtime.astimezone(pytz.utc).replace(tzinfo=None)
else:
log.warning('No timezone is defined.')
return dtime
def baseTableType(self):
"""
Returns the base table type that all other tables will inherit from.
By default, the orb.Table instance will be used, however, the developer
can provide their own base table using the setBaseTableType method.
:return <subclass of Table>
"""
if not self._tableclass:
return orb.Table
return self._tableclass
def baseTimezone(self):
if self._baseTimezone is None:
default = self.settings().defaultTimezone()
if default:
try:
self._timezone = pytz.timezone(default)
except ImportError:
log.error('pytz must be installed for timezone support.')
else:
try:
self._timezone = tzlocal.get_localzone()
except ImportError:
log.error('tzlocal must be installed for local zone support.')
return self._baseTimezone
def cache(self):
"""
Returns the global cache store for this manager.
:return <orb.caching.DataCache> || None
"""
if self._cache is None:
self._cache = orb.DataCache.byName('Basic')()
return self._cache
def clear(self):
"""
Clears out all the current data from this orb instance.
"""
self._environment = None
self._database = None
# close any active connections
for db in self._databases:
db.disconnect()
# close any active environments
for env in self._environments:
env.clear()
self._filename = ''
self._referenceFiles = []
self._groups.clear()
self._environments.clear()
self._databases.clear()
self._schemas.clear()
self._properties.clear()
def customEngines(self, databaseType):
"""
Returns a list of the custom engines for the database type.
:param databaseType | <str>
:return [(<ColumnType> typ, <subclass of CommandEngine> eng), ..]
"""
try:
return self._customEngines[databaseType].items()
except (KeyError, AttributeError):
return []
def database(self, name=None, environment=None):
"""
Returns the database for this manager based on the inputted name. \
If no name is supplied, then the currently active database is \
returned. If the environment variable is specified then the \
database lookup will occur in the specific environment, otherwise \
the active environment is used.
:usage |>>> import orb
|>>> orb.system.database() # returns active database
|>>> orb.system.database('User') # returns the User db
|>>> orb.system.database('User', 'Debug') # from Debug
:param name | <str> || None
environment | <str> || <orb.environment.Environment> || None
:return <orb.database.Database> || None
"""
# if no name is given, then return the current database
if not name:
return self._database
# otherwise, if the environment is provided, then use the environments
# database for the given name
if environment is None:
environment = self._environment
elif not isinstance(environment, orb.Environment):
environment = self.environment(environment)
if environment:
db = environment.database(name)
else:
db = None
if db:
return db
for db in self._databases:
if db.name() == name:
return db
return None
def databases(self, recursive=False):
"""
Returns the databases for this system. If the recursive flag is \
set, then all databases defined by all environments will also be \
returned.
:return [<orb.database.Database>, ..]
"""
output = list(self._databases)
if recursive:
for env in self.environments():
output += env.databases()
return output
def databaseSchemas(self, db, base=None):
"""
Returns a list of schemas that are mapped to the inputted database.
:param db | <orb.database.Database>
:return [<orb.TableSchema>, ..]
"""
return [schema for schema in self._schemas
if schema.database() == db and (base is None or isinstance(schema, base))]
def decrypt(self, text):
"""
Decrypts the inputted text based on the managers security token.
:warning Before using encryption, it is recommended to set a custom token for
the system using the setToken method.
:param text | <str>
:return <str>
"""
return projex.security.decrypt(text, self.token())
def encrypt(self, text):
"""
Encrypts the inputted text based on the managers security token.
:warning Before using encryption, it is recommended to set a custom token for
the system using the setToken method.
:param text | <str>
:return <str>
"""
return projex.security.encrypt(text, self.token())
def environment(self, name=None):
"""
Returns the environment for this manager based on the inputted name. \
If no name is supplied, then the currently active environment is \
returned.
:param name | <str> || None
:return <orb.environment.Environment> || None
"""
if name is None:
return self._environment
for env in self._environments:
if env.name() == name:
return env
return None
def environments(self):
"""
Returns a list of all the environments that are used by this orb \
instance.
:return [<orb.environment.Environment>, ..]
"""
return list(self._environments)
def findRelatedColumns(self, schema):
"""
Looks up all the related columns and tables for the inputted table \
schema.
:param schema | <orb.tableschema.TableSchema>
"""
names = [schema.name()] + schema.inheritsRecursive()
related_columns = []
for table_schema in self.schemas():
for column in table_schema.columns():
if column in related_columns:
continue
if column.reference() in names:
related_columns.append(column)
return related_columns
def findRelations(self, schema):
"""
Looks up all the related columns and tables for the inputted table \
schema.
:param schema | <orb.tableschema.TableSchema>
"""
names = [schema.name()] + schema.inheritsRecursive()
relations = []
processed = []
for table_schema in self.schemas():
rel_cols = []
for column in table_schema.columns():
if column in processed:
continue
if column.reference() in names:
rel_cols.append(column)
processed.append(column)
if rel_cols:
relations.append((table_schema.model(), rel_cols))
return relations
def filename(self):
"""
Returns the filename linked with this orb manager. This property will \
be set by the load and save methods.
:return <str>
"""
return self._filename
def group(self, name, autoAdd=False, database=None):
"""
Returns a group based on the inputted name.
:param name | <str>
autoAdd | <bool>
database | <str> || None
:return <orb.TableGroup> || None
"""
for group in self._groups:
if group.name() == name and (database is None or group.databaseName() == database):
return group
return None
def groups(self, database=None):
"""
Returns a list of all the registered groups for this orb instance.
:return [<orb.TableGroup>, ..]
"""
if database is None:
return list(self._groups)
else:
return [grp for grp in self._groups if grp.databaseName() == database]
def inheritedModels(self, model):
"""
Returns any models that inherit from the inputted model.
:return [<orb.tableschema.Table>, ..]
"""
out = []
for schema in self.schemas():
smodel = schema.model()
if model == smodel:
continue
if smodel and issubclass(smodel, model):
out.append(smodel)
return out
def isCachingEnabled(self):
"""
Returns whether or not global caching will exist for the system.
:return <bool>
"""
return self.settings().isCachingEnabled()
def locale(self, options=None):
"""
Returns the current locale that the system is going to be in.
:param options | <orb.ContextOptions>
:return <str>
"""
if callable(self._locale):
return self._locale(options)
return self._locale
def load(self, filename='', includeReferences=False):
"""
Loads the settings for this orb manager from the inputted xml file.
:param filename | <str>
:return <bool> | success
"""
if not filename:
filename = self.filename()
if not (filename and os.path.exists(filename)):
log.error('Invalid ORB file: %s' % filename)
return False
self.clear()
if not self.merge(filename, includeReferences=includeReferences):
return False
self._filename = nstr(filename)
return True
def loadModels(self,
scope,
groupName=None,
autoGenerate=True,
schemas=None,
database=None):
"""
Loads the models from the orb system into the inputted scope.
:param scope | <dict>
groupName | <str> || None
autoGenerate | <bool>
schemas | [<orb.TableSchema>, ..] || None
database | <str> || None
"""
# ensure we have a valid scope to load
if scope is None:
return []
# ensure we have schemas to load
if schemas is None:
schemas = self.schemas(database)
# load models for the schemas
added = []
for schema in schemas:
# ensure the desired group name is what we are loading
if groupName is not None and schema.groupName() != groupName:
continue
model = schema.model(autoGenerate=autoGenerate)
scope[model.__name__] = model
added.append(schema.name())
return added
def maxCacheTimeout(self):
"""
Returns the maximum cache timeout allowed (in minutes) for a cache.
:return <int>
"""
return self.settings().maxCacheTimeout()
def merge(self,
filename_or_xml,
includeReferences=False,
referenced=False,
dereference=False,
database=None):
"""
Merges the inputted ORB file to the schema.
:param filename_or_xml | <str> || <xml.etree.ElementTree.Element>
includeReferences | <bool>
referenced | <bool> | flags the schemas as being referenced
database | <str> || None
:return [<orb.TableSchema>, ..]
"""
if dereference:
referenced = False
if not isinstance(filename_or_xml, ElementTree.Element):
filename = nstr(filename_or_xml)
xorb = None
else:
filename = ''
xorb = filename_or_xml
output = []
# load a directory of ORB files
if filename and os.path.isdir(filename):
for orbfile in glob.glob(os.path.join(filename, '*.orb')):
output += self.merge(orbfile,
includeReferences,
referenced,
dereference=dereference,
database=database)
return output
# ensure the orb file exists
if xorb is None and not os.path.exists(filename):
return output
# load a reference system
if filename and referenced:
# only load reference files once
if filename in self._referenceFiles:
return []
self._referenceFiles.append(filename)
if filename and xorb is None:
try:
xorb = ElementTree.parse(filename).getroot()
except (ExpatError, ElementTree.ParseError):
xorb = None
# check for encrypted files
if xorb is None:
f = open(filename, 'r')
data = f.read()
f.close()
# try decrypting the data
decrypted = self.decrypt(data)
try:
xorb = ElementTree.fromstring(decrypted)
except (ExpatError, ElementTree.ParseError):
log.exception('Failed to load ORB file: %s' % filename)
return []
output = []
# load references
if includeReferences:
xrefs = xorb.find('references')
if xrefs is not None:
for xref in xrefs:
ref_path = xref.get('path').replace('\\', '/')
ref_path = os.path.join(filename, ref_path)
ref_path = os.path.normpath(ref_path)
ref_path = os.path.abspath(ref_path)
output += self.merge(ref_path,
includeReferences=True,
referenced=True,
dereference=dereference,
database=database)
# load properties
xprops = xorb.find('properties')
if xprops is not None:
for xprop in xprops:
self.setProperty(xprop.get('key'), xprop.get('value'))
# load environments
xenvs = xorb.find('environments')
if xenvs is not None:
for xenv in xenvs:
env = orb.Environment.fromXml(xenv, referenced)
self.registerEnvironment(env, env.isDefault())
# load databases
xdbs = xorb.find('databases')
if xdbs is not None:
for xdb in xdbs:
db = orb.Database.fromXml(xdb, referenced)
self.registerDatabase(db, db.isDefault())
# load schemas
xgroups = xorb.find('groups')
if xgroups is not None:
for xgroup in xgroups:
grp, schemas = orb.TableGroup.fromXml(xgroup,
referenced,
database=database,
manager=self)
if not grp:
continue
self.registerGroup(grp)
for schema in schemas:
self.registerSchema(schema)
output += schemas
return output
def model(self, name, autoGenerate=False, database=None):
"""
Looks up a model class from the inputted name.
:param name | <str>
autoGenerate | <bool>
:return <subclass of Table> || <orb.Table> || None
"""
schema = self.schema(name, database)
# define a model off an existing schema
if schema:
return schema.model(autoGenerate=autoGenerate)
# generate a blank table
elif autoGenerate:
log.warning('Could not find a schema for model: %s' % name)
return orb.Table
return None
def models(self, database=None):
"""
Returns a list of all the models that have been defined within the
manager's scope.
:return [<subclass of orb.Table>, ..]
"""
models = [schema.model() for schema in self.schemas(database)]
return filter(lambda m: m is not None, models)
def namespace(self):
"""
Returns the current namespace that the system should be operating in.
:return <str>
"""
return self._namespace
def now(self):
"""
Return a timezone mapped representation of now.
:return <datetime.datetime>
"""
now = datetime.datetime.now()
tz = self.timezone()
base_tz = self.baseTimezone() or tz
if tz is None or base_tz is None:
log.warning('No local timezone defined.')
return now
elif base_tz == tz:
return tz.localize(now, is_dst=None)
else:
return base_tz.localize(now, is_dst=None).astimezone(tz)
def property(self, propname, default=''):
"""
Returns the property value for this manager from the given name. \
If no property is set, then the default value will be returned.
:return <str>
"""
return self._properties.get(propname, nstr(default))
def registerDatabase(self, database, active=True):
"""
Registers a particular database with this environment.
:param database | <orb.database.Database>
active | <bool>
"""
self._databases.add(database)
if active or not self._database:
self._database = database
def registerEnvironment(self, environment, active=False):
"""
Registers a particular environment with this environment.
:param database | <orb.environment.Environment>
active | <bool>
"""
self._environments.add(environment)
# set the active environment
if active or not self._environment:
self.setEnvironment(environment)
def registerGroup(self, group, database=None):
"""
Registers the inputted orb group to the system.
:param group | <orb.TableGroup>
"""
if group in self._groups:
return
if database is not None:
group.setDatabaseName(database)
group.setOrder(len(self._groups))
self._groups.add(group)
def registerSchema(self, schema, database=None):
"""
Registers the inputted schema with the environment.
:param schema | <orb.tableschema.TableSchema>
"""
if database is not None:
schema.setDatabaseName(database)
self._schemas.add(schema)
def save(self, encrypted=False):
"""
Saves the current orb structure out to a file. The filename will be \
based on the currently set name.
:param encrypted | <bool>
:sa saveAs
:return <bool>
"""
return self.saveAs(self.filename(), encrypted=encrypted)
def saveAs(self, filename, encrypted=False):
"""
Saves the current orb structure out to the inputted file.
:param filename | <str>
encrypted | <bool>
:return <bool> | success
"""
if not filename:
return False
filename = nstr(filename)
xorb = ElementTree.Element('orb')
xorb.set('version', orb.__version__)
# save out references
xrefs = ElementTree.SubElement(xorb, 'references')
for ref_file in sorted(self._referenceFiles):
rel_path = os.path.relpath(ref_file, filename)
xref = ElementTree.SubElement(xrefs, 'reference')
xref.set('path', rel_path)
# save out properties
xprops = ElementTree.SubElement(xorb, 'properties')
for key, value in sorted(self._properties.items()):
xprop = ElementTree.SubElement(xprops, 'property')
xprop.set('key', key)
xprop.set('value', value)
# save out the environments
xenvs = ElementTree.SubElement(xorb, 'environments')
for env in sorted(self.environments(), key=lambda x: x.name()):
if not env.isReferenced():
env.toXml(xenvs)
# save out the global databases
xdbs = ElementTree.SubElement(xorb, 'databases')
for db in sorted(self.databases(), key=lambda x: x.name()):
if not db.isReferenced():
db.toXml(xdbs)
# save out the groups
xgroups = ElementTree.SubElement(xorb, 'groups')
for grp in sorted(self.groups(), key=lambda x: x.name()):
if grp.isReferenced():
continue
grp.toXml(xgroups)
projex.text.xmlindent(xorb)
data = ElementTree.tostring(xorb)
if encrypted:
data = self.encrypt(data)
f = open(filename, 'w')
f.write(data)
f.close()
return True
def schema(self, name, database=None):
"""
Looks up the registered schemas for the inputted schema name.
:param name | <str>
database | <str> || None
:return <orb.tableschema.TableSchema> || None
"""
for schema in self._schemas:
if schema.name() == name and (database is None or schema.databaseName() == database):
return schema
return None
def schemas(self, database=None):
"""
Returns a list of all the schemas for this instance.
:return [<orb.tableschema.TableSchema>, ..]
"""
if database is None:
return list(self._schemas)
else:
return [schema for schema in self._schemas if schema.databaseName() == database]
def searchEngine(self):
"""
Returns the search engine that will be used for this system.
:return <orb.SearchEngine>
"""
return self._searchEngine
def searchThesaurus(self):
"""
Returns the search thesaurus associated with this manager's instance.
This will help drive synonyms that users will use while searching
the database. Thesauruses can be defined at a per class level,
and at the API as a whole.
:return <orb.SearchThesaurus>
"""
return self._searchEngine.thesaurus()
def settings(self):
"""
Returns the settings instance associated with this manager.
:return <orb.Settings>
"""
return self._settings
def setBaseTableType(self, tableType):
"""
Sets the base table type that all other tables will inherit from.
By default, the orb.Table instance will be used, however, the developer
can provide their own base table using the setBaseTableType method.
:param tableType | <subclass of Table> || None
"""
self._tableclass = tableType
def setBaseTimezone(self, timezone):
"""
Defines the timezone for this machine.
:param timezone | <pytz.Timezone>
"""
self._baseTimezone = timezone
def setCache(self, cache):
"""
Sets the global cache store for this manager.
:param cache | <orb.caching.DataCache> || None
"""
self._cache = cache
def setCachingEnabled(self, state):
"""
Sets globally whether or not to allow caching.
:param state | <bool>
"""
self.settings().setCachingEnabled(state)
def setCustomEngine(self, databaseType, columnType, engineClass):
"""
Returns a list of the custom engines for the database type.
:param databaseType | <str>
columnType | <orb.ColumnType>
engineClass | <subclass of CommandEngine>
"""
self._customEngines.setdefault(databaseType, {})
self._customEngines[databaseType][columnType] = engineClass
def setDatabase(self, database, environment=None):
"""
Sets the active database to the inputted database.
:param database | <str> || <orb.database.Database> || None
environment | <str> || <orb.environment.Environment> || None
"""
if not isinstance(database, orb.Database):
database = self.database(database, environment)
self._database = database
def setLocale(self, locale):
"""
Sets the locale that the orb file will be using.
:param locale | <str>
"""
if locale:
self._locale = locale
def setEnvironment(self, environment):
"""
Sets the active environment to the inputted environment.
:param environment | <str> || <orb.environment.Environment> || None
"""
if not isinstance(environment, orb.Environment):
environment = self.environment(environment)
self._environment = environment
self.setDatabase(environment.defaultDatabase())
def setMaxCacheTimeout(self, minutes):
"""
Sets the maximum cache timeout allowed (in minutes) for a cache.
:param minutes | <int>
"""
self.settings().setMaxCacheTimeout(minutes)
def setModel(self, name, model, database=None):
"""
Sets the model class for the inputted schema to the given model.
:param name | <str>
model | <subclass of Table>
:return <bool> | success
"""
if database is None:
try:
database = model.databaseName()
except AttributeError:
database = ''
schema = model.schema()
# replace out the old schema model with the new one
# for any other classes that have already been generated
old_model = schema.model()
# define a method to lookup the base model
def find_model(check, base):
if not check:
return None
if base in check.__bases__:
return check
for base_model in check.__bases__:
output = find_model(base_model, base)
if output:
return output
return None
# look for the old base model
for other_model in self.models():
if other_model == old_model:
continue
# replace the sub-class models
if other_model and issubclass(other_model, old_model):
# retrieve the root class that inherits from our old one
other_model = find_model(other_model, old_model)
new_bases = list(other_model.__bases__)
index = new_bases.index(old_model)
new_bases.remove(old_model)
new_bases.insert(index, model)
other_model.__bases__ = tuple(new_bases)
schema.setModel(model)
return True
def setNamespace(self, namespace):
"""
Sets the namespace that will be used for this system.
:param namespace | <str>
"""
self._namespace = namespace
def setProperty(self, prop, value):
"""
Sets the custom property to the inputted value.
:param prop | <str>
value | <str>
"""
self._properties[nstr(prop)] = nstr(value)
def setSearchEngine(self, engine):
"""
Sets the search engine that will be used for this system.
:param engine | <orb.SearchEngine>
"""
self._searchEngine = engine
def setSearchThesaurus(self, thesaurus):
"""
Returns the search thesaurus associated with this manager's instance.
This will help drive synonyms that users will use while searching
the database. Thesauruses can be defined at a per class level,
and at the API as a whole.
:param thesaurus | <orb.SearchThesaurus>
"""
self._searchEngine.setThesaurus(thesaurus)
if self._cache:
self._cache.expire()
def setTimezone(self, timezone):
"""
Sets the timezone for the system to the inptued zone. This will
affect how the date time information for UTC data will be returned
and formatted from the database.
Timezone support requires the pytz package to be installed.
:sa http://pytz.sourceforge.net/
https://pypi.python.org/pypi/tzlocal
:param timezone | <pytz.tzfile>
"""
self._timezone = timezone
def setToken(self, token):
"""
Sets the security token used to encrypt/decrypt information for this database.
:param token | <str>
"""
self._token = token
def timezone(self, options=None):
"""
Returns the timezone for the system. This will affect how the
date time information will be returned and formatted from the database.
Timezone support requires the pytz package to be installed. For
auto-default timezone based on computer settings, you also need to
make sure that the tzlocal package is installed.
:sa http://pytz.sourceforge.net/
https://pypi.python.org/pypi/tzlocal
:param options | <orb.ContextOptions> || None
:return <pytz.tzfile> || None
"""
if self._timezone is None:
return self.baseTimezone()
elif callable(self._timezone):
return self._timezone(options)
return self._timezone
def token(self):
"""
Returns the security token used to encrypt/decrypt information for this database.
:return <str>
"""
return self._token
def unregisterDatabase(self, database):
"""
Un-registers a particular database with this environment.
:param database | <orb.database.Database>
"""
database.disconnect()
try:
self._database.remove(database)
except KeyError:
pass
def unregisterGroup(self, group):
"""
Un-registers the inputted orb group to the system.
:param group | <orb.TableGroup>
"""
try:
self._groups.remove(group)
except KeyError:
pass
def unregisterEnvironment(self, environment):
"""
Un-registers a particular environment with this environment.
:param database | <orb.environment.Environment>
"""
try:
self._environments.remove(environment)
except KeyError:
pass
def unregisterSchema(self, schema):
"""
Un-registers the inputted schema with the environment.
:param schema | <orb.tableschema.TableSchema>
"""
try:
self._schemas.remove(schema)
except KeyError:
pass
@staticmethod
def databaseTypes():
"""
Returns a list of all the database types (Connection backends) that are
available for the system.
:return [<str>, ..]
"""
return orb.Connection.addons().keys()
@staticmethod
def instance():
"""
Returns the global instance of the Manager.
:return <orb.Manager>
"""
if not Manager._instance:
Manager._instance = Manager()
return Manager._instance
@staticmethod
def printHierarchy(obj):
"""
Prints the hierarchy for the inputted class to show the inheritance
information.
:param obj | <class>
"""
def collect_bases(subcls, indent=''):
out = [(indent + subcls.__name__, subcls.__module__)]
for b in subcls.__bases__:
out += collect_bases(b, indent + '-')
return out
bases = collect_bases(obj)
for base in bases:
print '{:<40s}{:>70s}'.format(*base)
@staticmethod
def quickinit(filename, scope=None):
"""
Loads the settings for the orb system from the inputted .orb filename. \
If the inputted scope variable is passed in, then the scope will be \
updated with the models from the system.
:param filename | <str>
scope | <bool>
:return <bool> | success
"""
mgr = Manager.instance()
# clear the current information
mgr.clear()
# load the new information
if not mgr.load(filename, includeReferences=True):
return False
# update the scope with the latest data
mgr.loadModels(scope)
return True
""" Defines the hook required for the PyInstaller to use projexui with it. """
import os
import projex.pyi
# bake the mako statement files into python files
basepath = os.path.dirname(__file__)
for root, folders, files in os.walk(os.path.join(basepath)):
for file_ in files:
if file_.endswith('.mako'):
src = os.path.join(root, file_)
targ = src.replace('.', '_') + '.py'
with open(src, 'r') as f:
data = f.read()
with open(targ, 'w') as f:
f.write(u'TEMPLATE = r"""\n{0}\n"""'.format(data))
# load the importable objects
hiddenimports, _ = projex.pyi.collect(basepath)
"""
Defines the querying classes and abstractions for ORB
"""
from .query import Query
from .querycompound import QueryCompound
from .queryaggregate import QueryAggregate
from .querypattern import QueryPattern

Sorry, the diff of this file is too big to display

"""
Defines the global query building syntax for generating db
agnostic queries quickly and easily.
"""
from projex.enum import enum
from projex.lazymodule import lazy_import
orb = lazy_import('orb')
class QueryAggregate(object):
Type = enum(
'Count',
'Maximum',
'Minimum',
'Sum'
)
def __init__(self, typ, table, **options):
self._type = typ
self._table = table
self._column = options.get('column', None)
self._lookupOptions = orb.LookupOptions(**options)
def columns(self):
"""
Returns the column associated with this aggregate.
:return [<orb.Column>, ..]
"""
if self._column:
if not isinstance(self._column, orb.Column):
col = self._table.schema().column(self._column)
else:
col = self._column
return col,
else:
return self._table.schema().primaryColumns()
def lookupOptions(self):
"""
Returns the lookup options instance for this aggregate.
:return <orb.LookupOptions>
"""
return self._lookupOptions
def table(self):
"""
Returns the table associated with this aggregate.
:return <orb.Table>
"""
return self._table
def type(self):
"""
Returns the type for this aggregate.
:return <str>
"""
return self._type
"""
Defines the global query building syntax for generating db
agnostic queries quickly and easily.
"""
import logging
import projex.text
from projex.lazymodule import lazy_import
from projex.enum import enum
from projex.text import nativestring as nstr
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
log = logging.getLogger(__name__)
orb = lazy_import('orb')
class QueryCompound(object):
""" Defines combinations of queries via either the AND or OR mechanism. """
Op = enum(
'And',
'Or'
)
def __contains__(self, value):
"""
Returns whether or not the query compound contains a query for the
inputted column name.
:param value | <variant>
:return <bool>
:usage |>>> from orb import Query as Q
|>>> q = Q('testing') == True
|>>> 'testing' in q
|True
|>>> 'name' in q
|False
"""
for query in self._queries:
if value in query:
return True
return False
def __nonzero__(self):
return not self.isNull()
def __str__(self):
"""
Returns the string representation for this query instance
:sa toString
"""
return self.toString()
def __init__(self, *queries, **options):
self._queries = queries
self._op = options.get('op', QueryCompound.Op.And)
self._name = nstr(options.get('name', ''))
def __and__(self, other):
"""
Creates a new compound query using the
QueryCompound.Op.And type.
:param other <Query> || <QueryCompound>
:return <QueryCompound>
:sa and_
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1) & (Q('name') == 'Eric')
|>>> print query
|(test does_not_equal 1 and name is Eric)
"""
return self.and_(other)
def __hash__(self):
return hash(self.toXmlString())
def __neg__(self):
"""
Negates the current state of the query.
:sa negate
:return self
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') == 1) & (Q('name') == 'Eric')
|>>> print -query
|NOT (test is 1 and name is Eric)
"""
return self.negated()
def __or__(self, other):
"""
Creates a new compound query using the
QueryCompound.Op.Or type.
:param other <Query> || <QueryCompound>
:return <QueryCompound>
:sa or_
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1) | (Q('name') == 'Eric')
|>>> print query
|(test isNot 1 or name is Eric)
"""
return self.or_(other)
def and_(self, other):
"""
Creates a new compound query using the
QueryCompound.Op.And type.
:param other <Query> || <QueryCompound>
:return <QueryCompound>
:sa __and__
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1).and_((Q('name') == 'Eric')
|>>> print query
|(test isNot 1 and name is Eric)
"""
if other is None or other.isNull():
return self
elif self.isNull():
return other
# grow this objects list if the operator types are the same
if self.operatorType() == QueryCompound.Op.And:
queries = list(self._queries)
queries.append(other)
opts = {'op': QueryCompound.Op.And}
return QueryCompound(*queries, **opts)
# create a new compound
return QueryCompound(self, other, op=QueryCompound.Op.And)
def copy(self):
"""
Returns a copy of this query compound.
:return <QueryCompound>
"""
out = QueryCompound()
out._queries = [q.copy() for q in self._queries]
out._op = self._op
return out
def columns(self, schema=None):
"""
Returns any columns used within this query.
:return [<orb.Column>, ..]
"""
output = []
for query in self.queries():
output += query.columns(schema=schema)
return list(set(output))
def expandShortcuts(self, basetable=None):
"""
Expands any shortcuts that were created for this query. Shortcuts
provide the user access to joined methods using the '.' accessor to
access individual columns for referenced tables.
:param basetable | <orb.Table> || None
:usage |>>> from orb import Query as Q
|>>> # lookup the 'username' of foreign key 'user'
|>>> Q('user.username') == 'bob.smith'
:return <orb.Query> || <orb.QueryCompound>
"""
output = self.copy()
queries = []
rset = None
for query in output._queries:
query = query.expandShortcuts(basetable)
# chain together joins into sub-queries
if isinstance(query, orb.Query) and \
isinstance(query.value(), orb.Query) and \
query.value().table(basetable) != query.table(basetable):
columns = [query.value().columnName()] if query.value().columnName() else ['id']
new_rset = query.value().table(basetable).select(columns=columns)
query = query.copy()
query.setOperatorType(query.Op.IsIn)
query.setValue(new_rset)
if rset is not None and rset.table() == query.table(basetable):
rset.setQuery(query & rset.query())
else:
queries.append(query)
rset = new_rset
# update the existing recordset in the chain
elif rset is not None and \
((isinstance(query, orb.Query) and rset.table() == query.table(basetable)) or
(isinstance(query, orb.QueryCompound) and rset.table() in query.tables(basetable))):
rset.setQuery(query & rset.query())
# clear out the chain and move on to the next query set
else:
rset = None
queries.append(query)
output._queries = queries
return output
def findValue(self, column, instance=1):
"""
Looks up the value for the inputted column name for the given instance.
If the instance == 1, then this result will return the value and a
0 instance count, otherwise it will decrement the instance for a
matching column to indicate it was found, but not at the desired
instance.
:param column | <str>
instance | <int>
:return (<bool> success, <variant> value, <int> instance)
"""
for query in self.queries():
success, value, instance = query.findValue(column, instance)
if success:
return success, value, 0
return False, None, instance
def isNull(self):
"""
Returns whether or not this join is empty or not.
:return <bool>
"""
am_null = True
for query in self._queries:
if not query.isNull():
am_null = False
break
return am_null
def name(self):
return self._name
def negated(self):
"""
Negates this instance and returns it.
:return self
"""
qcompound = QueryCompound(*self._queries)
qcompound._op = QueryCompound.Op.And if self._op == QueryCompound.Op.Or else QueryCompound.Op.Or
return qcompound
def operatorType(self):
"""
Returns the operator type for this compound.
:return <QueryCompound.Op>
"""
return self._op
def or_(self, other):
"""
Creates a new compound query using the
QueryCompound.Op.Or type.
:param other <Query> || <QueryCompound>
:return <QueryCompound>
:sa or_
:usage |>>> from orb import Query as Q
|>>> query = (Q('test') != 1).or_(Q('name') == 'Eric')
|>>> print query
|(test isNot 1 or name is Eric)
"""
if other is None or other.isNull():
return self
elif self.isNull():
return other
# grow this objects list if the operator types are the same
if self.operatorType() == QueryCompound.Op.Or:
queries = list(self._queries)
queries.append(other)
opts = {'op': QueryCompound.Op.Or}
return QueryCompound(*queries, **opts)
return QueryCompound(self, other, op=QueryCompound.Op.Or)
def queries(self):
"""
Returns the list of queries that are associated with
this compound.
:return <list> [ <Query> || <QueryCompound>, .. ]
"""
return self._queries
def removed(self, columnName):
"""
Removes the query containing the inputted column name from this
query set.
:param columnName | <str>
:return <QueryCompound>
"""
out = self.copy()
new_queries = []
for query in out._queries:
new_queries.append(query.removed(columnName))
out._queries = new_queries
return out
def setName(self, name):
self._name = nstr(name)
def setOperatorType(self, op):
"""
Sets the operator type that this compound that will be
used when joining together its queries.
:param op <QueryCompound.Op>
"""
self._op = op
def tables(self, base=None):
"""
Returns the tables that this query is referencing.
:return [ <subclass of Table>, .. ]
"""
output = []
for query in self._queries:
output += query.tables(base=base)
return list(set(output))
def toString(self):
"""
Returns this query instance as a semi readable language
query.
:warning This method will NOT return a valid SQL statement. The
backend classes will determine how to convert the Query
instance to whatever lookup code they need it to be.
:return <str>
"""
optypestr = QueryCompound.Op[self.operatorType()]
op_type = ' %s ' % projex.text.underscore(optypestr)
query = '(%s)' % op_type.join([q.toString() for q in self.queries()])
return query
def toDict(self):
"""
Creates a dictionary representation of this query.
:return <dict>
"""
output = {}
if self.isNull():
return output
output['type'] = 'compound'
output['name'] = self.name()
output['op'] = self.operatorType()
queries = []
for query in self.queries():
queries.append(query.toDict())
output['queries'] = queries
return output
def toXml(self, xparent=None):
"""
Returns this query as an XML value.
:param xparent | <xml.etree.ElementTree.Element> || None
:return <xml.etree.ElementTree.Element>
"""
if self.isNull():
return None
if xparent is None:
xquery = ElementTree.Element('compound')
else:
xquery = ElementTree.SubElement(xparent, 'compound')
xquery.set('name', nstr(self.name()))
xquery.set('op', nstr(self.operatorType()))
for query in self.queries():
query.toXml(xquery)
return xquery
def toXmlString(self, indented=False):
"""
Returns this query as an XML string.
:param indented | <bool>
:return <str>
"""
xml = self.toXml()
if indented:
projex.text.xmlindent(xml)
return ElementTree.tostring(xml)
def validate(self, record, table=None):
"""
Validates the inputted record against this query compound.
:param record | <orb.Table>
"""
op = self._op
queries = self.queries()
if not queries:
return False
for query in queries:
valid = query.validate(record, table)
if op == QueryCompound.Op.And and not valid:
return False
elif op == QueryCompound.Op.Or and valid:
return True
return op == QueryCompound.Op.And
@staticmethod
def build(compound, queries):
"""
Builds a compound based on the inputted compound string. This should
look like: ((QUERY_1 and QUERY_2) or (QUERY_3 and QUERY_4)). The
inputted query dictionary should correspond with the keys in the string.
This method will be called as part of the Query.fromString method and
probably never really needs to be called otherwise.
:param compound | <str>
queries | [<Query>, ..]
:return <Query> || <QueryCompound>
"""
indexStack = []
compounds = {}
new_text = projex.text.decoded(compound)
for index, char in enumerate(projex.text.decoded(compound)):
# open a new compound
if char == '(':
indexStack.append(index)
# close a compound
elif char == ')' and indexStack:
openIndex = indexStack.pop()
match = compound[openIndex + 1:index]
if not match:
continue
# create the new compound
new_compound = QueryCompound.build(match, queries)
key = 'QCOMPOUND_%i' % (len(compounds) + 1)
compounds[key] = new_compound
new_text = new_text.replace('(' + match + ')', key)
new_text = new_text.strip('()')
query = orb.Query()
last_op = 'and'
for section in new_text.split():
section = section.strip('()')
# merge a compound
if section in compounds:
section_q = compounds[section]
elif section in queries:
section_q = queries[section]
elif section in ('and', 'or'):
last_op = section
continue
else:
log.warning('Missing query section: %s', section)
continue
if query is None:
query = section_q
elif last_op == 'and':
query &= section_q
else:
query |= section_q
return query
@staticmethod
def fromDict(data):
if data.get('type') != 'compound':
return orb.Query.fromDict(data)
compound = QueryCompound()
compound.setName(data.get('name', ''))
compound.setOperatorType(int(data.get('op', '1')))
queries = []
for subdata in data.get('queries', []):
queries.append(orb.Query.fromDict(subdata))
compound._queries = queries
return compound
@staticmethod
def fromString(querystr):
"""
Returns a new compound from the inputted query string. This simply calls
the Query.fromString method, as the two work the same.
:param querystr | <str>
:return <Query> || <QueryCompound> || None
"""
return orb.Query.fromString(querystr)
@staticmethod
def fromXml(xquery):
if xquery.tag == 'query':
return orb.Query.fromXml(xquery)
compound = QueryCompound()
compound.setName(xquery.get('name', ''))
compound.setOperatorType(int(xquery.get('op', '1')))
queries = []
for xsubquery in xquery:
queries.append(orb.Query.fromXml(xsubquery))
compound._queries = queries
return compound
@staticmethod
def fromXmlString(xquery_str):
"""
Returns a query from the XML string.
:param xquery_str | <str>
:return <orb.Query> || <orb.QueryCompound>
"""
try:
xml = ElementTree.fromstring(xquery_str)
except ExpatError:
return orb.Query()
return orb.Query.fromXml(xml)
@staticmethod
def typecheck(obj):
"""
Returns whether or not the inputted object is a QueryCompound object.
:param obj <variant>
:return ,bool>
"""
return isinstance(obj, QueryCompound)
"""
Defines the global query building syntax for generating db
agnostic queries quickly and easily.
"""
import os
import re
FIELD_SYNTAX = os.environ.get('ORB_FIELD_SYNTAX', '(?P<%s>[\w_\.]+)')
VALUE_SYNTAX = os.environ.get('ORB_VALUE_SYNTAX',
'(?P<%s>([\w\-_\.,]+|"[^"]+")|\[[^\]]+\])')
class QueryPattern(object):
def __init__(self, syntax):
self._syntax = syntax
field = FIELD_SYNTAX % 'field'
value = VALUE_SYNTAX % 'value'
val_min = VALUE_SYNTAX % 'min'
val_max = VALUE_SYNTAX % 'max'
opts = {'value': value,
'min': val_min,
'max': val_max,
'field': field}
expr = syntax % opts
self._pattern = re.compile(expr)
def pattern(self):
"""
Returns the regular expression pattern for this pattern.
:return <re.SRE_Pattern>
"""
return self._pattern
def syntax(self):
"""
Returns the string syntax to be used for this pattern.
:return <str>
"""
return self._syntax
import orb
import projex.rest
# assign record encode/decoders
def record_encoder(py_obj):
# encode a record
if orb.Table.recordcheck(py_obj) or orb.View.recordcheck(py_obj):
return True, py_obj.json()
# encode a recordset
elif orb.RecordSet.typecheck(py_obj):
return True, py_obj.json()
# encode a query
elif orb.Query.typecheck(py_obj):
return True, py_obj.toDict()
return False, None
projex.rest.register(record_encoder)
""" Defines the meta information for a column within a table schema. """
from .column import Column
from .columnaggregator import ColumnAggregator
from .columnjoiner import ColumnJoiner
from .index import Index
from .pipe import Pipe
from .table import Table
from .tablegroup import TableGroup
from .tableschema import TableSchema
from .view import View
from .viewschema import ViewSchema
from .validator import RegexValidator, RequiredValidator, AbstractColumnValidator, AbstractRecordValidator
""" Defines the meta information for a column within a table schema. """
import datetime
import decimal
import logging
import projex.regex
import projex.text
import time
from projex.enum import enum
from projex.lazymodule import lazy_import
from projex.text import nativestring as nstr
from xml.etree import ElementTree
from ..common import ColumnType, RemovedAction
try:
from dateutil import parser as dateutil_parser
except ImportError:
dateutil_parser = None
try:
from webhelpers.html import tools as html_tools
except ImportError:
html_tools = None
try:
import bleach
except ImportError:
bleach = None
log = logging.getLogger(__name__)
orb = lazy_import('orb')
pytz = lazy_import('pytz')
errors = lazy_import('orb.errors')
class Column(object):
""" Used to define database schema columns when defining Table classes. """
# define default naming system
TEMPLATE_PRIMARY_KEY = 'id'
TEMPLATE_GETTER = '[name::camelHump::lower_first]'
TEMPLATE_SETTER = 'set[name::camelHump::upper_first::lstrip(Is)]'
TEMPLATE_FIELD = '[name::underscore::lower]'
TEMPLATE_DISPLAY = '[name::upper_first::words]'
TEMPLATE_INDEX = 'by[name::camelHump::upper_first]'
TEMPLATE_REVERSED = '[name::reversed::camelHump::lower_first]'
TEMPLATE_REFERENCE = '[name::underscore::lower]_id'
TEMPLATE_MAP = {
'getterName': TEMPLATE_GETTER,
'setterName': TEMPLATE_SETTER,
'fieldName': TEMPLATE_FIELD,
'displayName': TEMPLATE_DISPLAY,
'indexName': TEMPLATE_INDEX,
'primaryKey': TEMPLATE_PRIMARY_KEY,
}
Flags = enum('ReadOnly',
'Private',
'Referenced',
'Polymorphic',
'Primary',
'AutoIncrement',
'Required',
'Unique',
'Encrypted',
'Searchable',
'IgnoreByDefault',
'Translatable',
'CaseSensitive')
Kind = enum('Field',
'Joined',
'Aggregate',
'Proxy')
def __str__(self):
return self.name() or self.fieldName() or '<< INVALID COLUMN >>'
def __init__(self, *args, **options):
# support 2 constructor methods orb.Column(typ, name) or
# orb.Column(name, type=typ)
if len(args) == 2:
options['type'], options['name'] = args
elif len(args) == 1:
options['name'] = args[0]
# define required arguments
self._name = options.get('name', '')
self._type = options.get('type', None)
self._schema = options.get('schema')
self._engines = {}
self._customData = {}
self._timezone = None
self._shortcut = options.get('shortcut', '')
# set default values
ref = options.get('reference', '')
for key in Column.TEMPLATE_MAP.keys():
if key not in options:
options[key] = Column.defaultDatabaseName(key, self._name, ref)
# naming & accessor options
self._getter = options.get('getter')
self._getterName = options.get('getterName')
self._setter = options.get('setter')
self._setterName = options.get('setterName')
self._fieldName = options.get('fieldName')
self._displayName = options.get('displayName')
# format options
self._stringFormat = options.get('stringFormat', '')
self._enum = options.get('enum', None)
# validation options
self._validators = []
# referencing options
self._referenced = options.get('referenced', False)
self._reference = options.get('reference', '')
self._referenceRemovedAction = options.get('referenceRemovedAction',
RemovedAction.DoNothing)
# reversed referencing options
self._reversed = options.get('reversed', bool(options.get('reversedName')))
self._reversedName = options.get('reversedName', '')
self._reversedCached = options.get('reversedCached', False)
self._reversedCacheTimeout = options.get('reversedCacheTimeout', options.get('reversedCachedExpires', 0))
# indexing options
self._indexed = options.get('indexed', False)
self._indexCached = options.get('indexCached', False)
self._indexName = options.get('indexName')
self._indexCacheTimeout = options.get('indexCacheTimeout', options.get('indexCachedExpires', 0))
# additional properties
self._default = options.get('default', None)
self._maxlength = options.get('maxlength', 0)
self._joiner = options.get('joiner', None)
self._aggregator = options.get('aggregator', None)
# flags options
flags = options.get('flags', 0)
# by default, all columns are data columns
if options.get('primary'):
flags |= Column.Flags.Primary
if options.get('private'):
flags |= Column.Flags.Private
if options.get('readOnly'):
flags |= Column.Flags.ReadOnly
if options.get('polymorphic'):
flags |= Column.Flags.Polymorphic
if options.get('autoIncrement'):
flags |= Column.Flags.AutoIncrement
if options.get('required', options.get('primary')):
flags |= Column.Flags.Required
if options.get('unique'):
flags |= Column.Flags.Unique
if options.get('encrypted', self._type == ColumnType.Password):
flags |= Column.Flags.Encrypted
if options.get('searchable'):
flags |= Column.Flags.Searchable
if options.get('ignoreByDefault'):
flags |= Column.Flags.IgnoreByDefault
if options.get('translatable'):
flags |= Column.Flags.Translatable
self._flags = flags
# determine the kind of column that this column is
if options.get('proxy'):
self._kind = Column.Kind.Proxy
elif self._joiner or self._shortcut:
self._kind = Column.Kind.Joined
elif self._aggregator:
self._kind = Column.Kind.Aggregate
else:
self._kind = Column.Kind.Field
def aggregate(self):
"""
Returns the query aggregate that is associated with this column.
:return <orb.QueryAggregate> || None
"""
if self._aggregator:
return self._aggregator.generate(self)
return None
def aggregator(self):
"""
Returns the aggregation instance associated with this column. Unlike
the <aggregate> function, this method will return the class instance
versus the resulting <orb.QueryAggregate>.
:return <orb.ColumnAggregator> || None
"""
return self._aggregator
def autoIncrement(self):
"""
Returns whether or not this column should
autoIncrement in the database.
:sa testFlag
:return <bool>
"""
return self.testFlag(Column.Flags.AutoIncrement)
def columnType(self, baseOnly=False):
"""
Returns the type of data that this column represents.
:return <orb.common.ColumnType>
"""
if baseOnly:
return ColumnType.base(self._type)
return self._type
def copy(self):
"""
Creates a copy of this column and returns it.
:return <orb.Column>
"""
return Column.fromXml(self.toXml())
def columnTypeText(self, baseOnly=False):
"""
Returns the column type text for this column.
:return <str>
"""
return ColumnType[self.columnType(baseOnly=baseOnly)]
def customData(self, key, default=None):
"""
Returns custom information that was assigned to this column for the \
inputted key. If no value was assigned to the given key, the inputted \
default value will be returned.
:param key | <str>
default | <variant>
:return <variant>
"""
return self._customData.get(key, default)
def engine(self, db=None):
"""
Returns the data engine for this column for the given database.
Individual databases can define their own data engines. If no database
is defined, then the currently active database will be used.
:param db | <orb.Database> || None
:return <orb.ColumnEngine> || None
"""
try:
return self._engines[db]
except KeyError:
engine = None
# lookup the current database
if db is None:
db = orb.system.database()
# lookup the database engine for this instance
if db:
try:
engine = self._engines[db.databaseType()]
except KeyError:
engine = db.columnEngine(self)
self._engines[db] = engine
return engine
def default(self, resolve=False):
"""
Returns the default value for this column to return
when generating new instances.
:return <variant>
"""
default = self._default
ctype = ColumnType.base(self.columnType())
if not resolve:
return default
elif default == 'None':
return None
elif ctype == ColumnType.Bool:
if type(default) == bool:
return default
else:
return nstr(default) in ('True', '1')
elif ctype in (ColumnType.Integer,
ColumnType.Enum,
ColumnType.BigInt,
ColumnType.Double):
if type(default) in (str, unicode):
try:
return eval(default)
except SyntaxError:
return 0
elif default is None:
return 0
else:
return default
elif ctype == ColumnType.Date:
if isinstance(default, datetime.date):
return default
elif default in ('today', 'now'):
return datetime.date.today()
return None
elif ctype == ColumnType.Time:
if isinstance(default, datetime.time):
return default
elif default == 'now':
return datetime.datetime.now().time()
else:
return None
elif ctype in (ColumnType.Datetime, ColumnType.Timestamp):
if isinstance(default, datetime.datetime):
return default
elif default in ('today', 'now'):
return datetime.datetime.now()
else:
return None
elif ctype == (ColumnType.DatetimeWithTimezone, ColumnType.Timestamp_UTC):
if isinstance(default, datetime.datetime):
return default
elif default in ('today', 'now'):
return datetime.datetime.utcnow()
else:
return None
elif ctype == ColumnType.Interval:
if isinstance(default, datetime.timedelta):
return default
elif default == 'now':
return datetime.timedelta()
else:
return None
elif ctype == ColumnType.Decimal:
if default is None:
return decimal.Decimal()
return default
elif ctype & (ColumnType.String |
ColumnType.Text |
ColumnType.Url |
ColumnType.Email |
ColumnType.Password |
ColumnType.Filepath |
ColumnType.Directory |
ColumnType.Xml |
ColumnType.Html |
ColumnType.Color):
if default is None:
return ''
return default
else:
return None
def defaultOrder(self):
"""
Returns the default ordering for this column based on its type.
Strings will be desc first, all other columns will be asc.
:return <str>
"""
if self.isString():
return 'desc'
return 'asc'
def displayName(self, autoGenerate=True):
"""
Returns the display name for this column - if no name is \
explicitly set, then the words for the column name will be \
used.
:param autoGenerate | <bool>
:return <str>
"""
if not autoGenerate or self._displayName:
return self._displayName
return projex.text.capitalizeWords(self.name())
def enum(self):
"""
Returns the enumeration that is associated with this column. This can
help for automated validation when dealing with enumeration types.
:return <projex.enum.enum> || None
"""
return self._enum
def getter(self):
"""
Returns the getter method linked with this column. This is used in
proxy columns.
:return <callable> || None
"""
return self._getter
def fieldName(self):
"""
Returns the field name that this column will have inside
the database. The Column.TEMPLATE_FIELD variable will be
used for this property by default.
:return <str>
"""
return self._fieldName
def firstMemberSchema(self, schemas):
"""
Returns the first schema within the list that this column is a member
of.
:param schemas | [<orb.TableSchema>, ..]
:return <orb.TableSchema> || None
"""
for schema in schemas:
if schema.hasColumn(self):
return schema
return self.schema()
def flags(self):
"""
Returns the flags that have been set for this column.
:return <Column.Flags>
"""
return self._flags
def getterName(self):
"""
Returns the name for the getter method that will be
generated for this column. The Column.TEMPLATE_GETTER
variable will be used for this property by default.
:return <str>
"""
return self._getterName
def indexed(self):
"""
Returns whether or not this column is indexed for quick
lookup.
:return <bool>
"""
return self._indexed
def indexCached(self):
"""
Returns whether or not the index for this column should cache the
records.
:return <bool>
"""
return self._indexCached
def indexCacheTimeout(self):
"""
Returns the time in seconds for how long to store a client side cache
of the results for the index on this column.
:return <int> | seconds
"""
return self._indexCacheTimeout
def indexName(self):
"""
Returns the name to be used when generating an index
for this column.
:return <str>
"""
return self._indexName
def isAggregate(self):
"""
Returns whether or not this column is a aggregate.
:return <bool>
"""
return self._aggregator is not None
def isEncrypted(self):
"""
Returns whether or not the data in this column should be encrypted.
:return <bool>
"""
return self.testFlag(Column.Flags.Encrypted)
def isInteger(self):
"""
Returns whether or not this column is an integer.
:return <bool>
"""
btype = ColumnType.base(self.columnType())
return btype in (ColumnType.Integer, ColumnType.BigInt, ColumnType.Enum)
def isKind(self, kind):
"""
Returns whether or not this column is the kind of inputted type.
:param kind | <orb.Column.Kind>
:return <bool>
"""
return bool(self._kind & kind) if kind >= 0 else not bool(self._kind & ~kind)
def isJoined(self):
"""
Returns whether or not this column is a joined column. Dynamic
columns are not actually a part of the database table, but rather
joined in data using a query during selection.
:return <bool>
"""
return self._joiner is not None
def isMatch(self, name):
"""
Returns whether or not this column's text info matches the inputted name.
:param name | <str>
"""
if name == self:
return True
opts = (self.name(),
self.name().strip('_'),
projex.text.camelHump(self.name()), # support both styles for string lookup
projex.text.underscore(self.name()),
self.displayName(),
self.fieldName())
return name in opts
def isMemberOf(self, schemas):
"""
Returns whether or not this column is a member of any of the given
schemas.
:param schemas | [<orb.TableSchema>, ..] || <orb.TableSchema>
:return <bool>
"""
if type(schemas) not in (tuple, list, set):
schemas = (schemas,)
for schema in schemas:
if schema.hasColumn(self):
return True
return False
def isPolymorphic(self):
"""
Returns whether or not this column defines the polymorphic class
for the schema. If this is set to True in conjunction with a
reference model, the text for the reference model will be used
to try to inflate the record to its proper class.
:sa testFlag
:return <bool>
"""
return self.testFlag(Column.Flags.Polymorphic)
def isPrivate(self):
"""
Returns whether or not this column should be treated as private.
:sa testFlag
:return <bool>
"""
return self.testFlag(Column.Flags.Private)
def isProxy(self):
"""
Returns whether or not this column is a proxy column.
:sa testFlag
:return <bool>
"""
return self.isKind(Column.Kind.Proxy)
def isReadOnly(self):
"""
Returns whether or not this column is read-only.
:sa testFlag
:return <bool>
"""
return self.testFlag(Column.Flags.ReadOnly)
def isReference(self):
"""
Returns whether or not this column is a reference to another table.
:return <bool>
"""
if self._reference:
return True
return False
def isReferenced(self):
"""
Returns whether or not this column is referenced from an external file.
:return <bool>
"""
return self._referenced
def isReversed(self):
"""
Returns whether or not this column generates a reverse lookup method \
for its reference model.
:return <bool>
"""
return self._reversed
def isSearchable(self):
"""
Returns whether or not this column is a searchable column. If it is,
when the user utilizes the search function for a record set, this column
will be used as a matchable entry.
:sa testFlag
:return <bool>
"""
return self.testFlag(Column.Flags.Searchable)
def isString(self):
"""
Returns whether or not this column is of a string type.
:return <bool>
"""
string_types = ColumnType.String
string_types |= ColumnType.Text
string_types |= ColumnType.Url
string_types |= ColumnType.Email
string_types |= ColumnType.Password
string_types |= ColumnType.Filepath
string_types |= ColumnType.Directory
string_types |= ColumnType.Xml
string_types |= ColumnType.Html
string_types |= ColumnType.Color
return (ColumnType.base(self.columnType()) & string_types) != 0
def isTranslatable(self):
"""
Returns whether or not this column is translatable.
:return <bool>
"""
return self.testFlag(Column.Flags.Translatable)
def iterFlags(self):
"""
Returns the flags that are currently set for this instance.
:return [<Column.Flags>, ..]
"""
return [flag for flag in Column.Flags.values() if self.testFlag(flag)]
def kind(self):
"""
Returns the general kind of column this is.
:return <orb.Column.Kind>
"""
return self._kind
def joiner(self):
"""
Returns the joiner query that is used to define what this columns
value will be.
:return (<orb.Column>, <orb.Query>) || None
"""
joiner = self._joiner
# dynamically generate a join query based on the inputted function
if type(joiner).__name__ == 'function':
return joiner(self)
# otherwise, if there is a shortcut, generate that
else:
return orb.Query(self.shortcut())
def maxlength(self):
"""
Returns the max length for this column. This property
is used for the varchar data type.
:return <int>
"""
return self._maxlength
def memberOf(self, schemas):
"""
Returns a list of schemas this column is a member of from the inputted
list.
:param schemas | [<orb.TableSchema>, ..]
:return [<orb.TableSchema>, ..]
"""
for schema in schemas:
if schema.hasColumn(self):
yield schema
def name(self):
"""
Returns the accessor name that will be used when
referencing this column around the app.
:return <str>
"""
return self._name
def primary(self):
"""
Returns if this column is one of the primary keys for the
schema.
:sa testFlag
:return <bool>
"""
return self.testFlag(Column.Flags.Primary)
def reference(self):
"""
Returns the model that this column is related to when
it is a foreign key.
:return <str>
"""
return self._reference
def referenceRemovedAction(self):
"""
Determines how records for this column will act when the reference it
points to is removed.
:return <ReferencedAction>
"""
return self._referenceRemovedAction
def referenceModel(self):
"""
Returns the model that this column references.
:return <Table> || None
"""
if not self.isReference():
return None
dbname = self.schema().databaseName() or None
model = orb.system.model(self.reference(), database=dbname)
if not model:
raise errors.TableNotFound(self.reference())
return model
def restoreValue(self, value, options=None):
"""
Restores the value from a table cache for usage.
:param value | <variant>
options | <orb.ContextOptions> || None
"""
coltype = ColumnType.base(self.columnType())
# always allow NULL types
if value is None:
return value
# restore a datetime timezone value
if isinstance(value, datetime.datetime) and \
coltype == ColumnType.DatetimeWithTimezone:
tz = self.timezone(options)
if tz is not None:
if value.tzinfo is None:
base_tz = orb.system.baseTimezone()
# the machine timezone and preferred timezone match, so create off utc time
if base_tz == tz:
value = tz.fromutc(value)
# convert the server timezone to a preferred timezone
else:
value = base_tz.fromutc(value).astimezone(tz)
else:
value = value.astimezone(tz)
else:
log.warning('No local timezone defined')
# restore a timestamp value
elif coltype in (ColumnType.Timestamp, ColumnType.Timestamp_UTC) and isinstance(value, (int, float)):
value = datetime.datetime.fromtimestamp(value)
# restore a string value
elif self.isString():
value = projex.text.decoded(value)
return value
def required(self):
"""
Returns whether or not this column is required when
creating records in the database.
:sa testFlag
:return <bool>
"""
return self.testFlag(Column.Flags.Required)
def returnType(self):
"""
Defines the return class type name that will be expected for
this column type.
"""
typ = '<variant>'
if self.columnType() == ColumnType.Bool:
typ = '<bool>'
elif self.columnType() in (ColumnType.Decimal, ColumnType.Double):
typ = '<float>'
elif self.columnType() in (ColumnType.Integer, ColumnType.BigInt):
typ = '<int>'
elif self.columnType() == ColumnType.Enum:
typ = '<int> (enum)'
elif self.columnType() in (ColumnType.Datetime, ColumnType.Timestamp, ColumnType.Timestamp_UTC):
typ = '<datetime.datetime>'
elif self.columnType() == ColumnType.Date:
typ = '<datetime.date> (without tz_info)'
elif self.columnType() == ColumnType.Interval:
typ = '<datetime.timedelta>'
elif self.columnType() == ColumnType.Time:
typ = '<datetime.time>'
elif self.columnType() == ColumnType.DatetimeWithTimezone:
typ = '<datetime.datetime> (with tz_info)'
elif self.columnType() == ColumnType.Image:
typ = '<unicode> (image bytea)'
elif self.columnType() == ColumnType.ByteArray:
typ = '<bytea>'
elif self.columnType() == ColumnType.Dict:
typ = '<dict>'
elif self.columnType() == ColumnType.Pickle:
typ = '<variant> (pickle data)'
elif self.columnType() == ColumnType.Query:
typ = '<orb.query.Query>'
elif self.columnType() == ColumnType.ForeignKey:
typ = '<orb.schema.dynamic.{0}> || <variant> (primary key when not inflated)'.format(self.reference())
elif self.isString():
typ = '<unicode>'
if not self.required():
typ += ' || None'
return typ
def reversedCached(self):
"""
Returns whether or not the reverse lookup for this column should
be cached.
:return <bool>
"""
return self._reversedCached
def reversedCacheTimeout(self):
"""
Returns the time in seconds that the cache should expire within. If
the value is 0, then the cache will never expire
:return <int> | seconds
"""
return self._reversedCacheTimeout
def reversedName(self):
"""
Returns the name that will be used when generating a reverse accessor \
for its reference table.
:return <str>
"""
return self._reversedName
def shortcut(self):
"""
Returns the shortcut for this column. This will traverse a relationship set
for the actual value as a join from another table.
:return <str>
"""
return self._shortcut
def storeValue(self, value):
"""
Converts the value to one that is safe to store on a record within
the record values dictionary
:param value | <variant>
:return <variant>
"""
coltype = ColumnType.base(self.columnType())
# ensure we have a value
if isinstance(value, (str, unicode)):
value = self.valueFromString(value)
if value is None:
return value
# store timezone information
elif coltype == ColumnType.DatetimeWithTimezone:
if isinstance(value, datetime.datetime):
# match the server information
tz = orb.system.baseTimezone() or self.timezone()
if tz is not None:
# ensure we have some timezone information before converting to UTC time
if value.tzinfo is None:
value = tz.localize(value, is_dst=None)
value = value.astimezone(pytz.utc).replace(tzinfo=None)
else:
log.warning('No local timezone defined.')
# store timestamp information
elif coltype in (ColumnType.Timestamp, ColumnType.Timestamp_UTC):
if isinstance(value, datetime.datetime):
value = time.mktime(value.timetuple())
# encrypt the value if necessary
elif self.isEncrypted():
return orb.system.encrypt(value)
# store non-html content
elif self.isString():
if bleach:
value = bleach.clean(value)
if coltype != ColumnType.Html and html_tools:
value = html_tools.strip_tags(value)
return value
def schema(self):
"""
Returns the table that this column is linked to in the database.
:return <TableSchema>
"""
return self._schema
def setterName(self):
"""
Returns the setter name that will be used to generate the
setter method on the table as it is generated. The
Column.TEMPLATE_SETTER property will be used by default.
:return <str>
"""
return self._setterName
def setter(self):
"""
Returns the setter method linked with this column. This is used in
proxy columns.
:return <callable> || None
"""
return self._setter
def setAutoIncrement(self, state):
"""
Sets whether or not this column should auto increment.
:sa setFlag
:param state | <bool>
"""
self.setFlag(Column.Flags.AutoIncrement, state)
def setColumnType(self, columnType):
"""
Sets the column type that this column represents in the database.
:param columnType | <ColumnType>
"""
self._type = columnType
def setCustomData(self, key, value):
"""
Sets the custom data at the inputted key to the given value.
:param key | <str>
value | <variant>
"""
self._customData[nstr(key)] = value
def setDefault(self, default):
"""
Sets the default value for this column to the inputted value.
:param default | <str>
"""
self._default = default
def setDisplayName(self, displayName):
"""
Sets the display name for this column.
:param displayName | <str>
"""
if displayName is not None:
self._displayName = displayName
def setEngine(self, db_or_type, engine):
"""
Sets the database engine for this column in the given database.
:param db_or_type | <orb.Database> || <str>
engine | <orb.ColumnEngine>
"""
self._engines[db_or_type] = engine
def setEnum(self, cls):
"""
Sets the enumeration that is associated with this column to the inputted
type. This is an optional parameter but can be useful when dealing
with validation and some of the automated features of the ORB system.
:param cls | <projex.enum.enum> || None
"""
self._enum = cls
def setEncrypted(self, state):
"""
Sets whether or not this column is encrypted in the database.
:sa setFlag
:param state | <bool>
"""
self.setFlag(Column.Flags.Encrypted, state)
def setFieldName(self, fieldName):
"""
Sets the field name for this column.
:param fieldName | <str>
"""
if fieldName is not None:
self._fieldName = fieldName
def setFlag(self, flag, state=True):
"""
Sets whether or not this flag should be on.
:param flag | <Column.Flags>
state | <bool>
"""
if state:
self._flags |= flag
else:
self._flags &= ~flag
def setFlags(self, flags):
"""
Sets the global flags for this column to the inputted flags.
:param flags | <Column.Flags>
"""
self._flags = flags
def setName(self, name):
"""
Sets the name of this column to the inputted name.
:param name | <str>
"""
self._name = name
def setGetterName(self, getterName):
"""
Sets the getter name for this column.
:param getterName | <str>
"""
if getterName is not None:
self._getterName = getterName
def setIndexed(self, state):
"""
Sets whether or not this column will create a lookup index.
:param state | <bool>
"""
self._indexed = state
def setIndexCached(self, cached):
"""
Sets whether or not the index should cache the results from the
database when looking up this column.
:param cached | <bool>
"""
self._indexCached = cached
def setIndexCacheTimeout(self, seconds):
"""
Sets the time in seconds for how long to store a client side cache
of the results for the index on this column.
:param seconds | <int>
"""
self._indexCacheTimeout = seconds
def setIndexName(self, indexName):
"""
Sets the index name for this column.
:param indexName | <str>
"""
if indexName is not None:
self._indexName = indexName
def setJoiner(self, joiner):
"""
Sets the joiner query for this column to the inputted query.
:param query | (<orb.Column>, <orb.Query>) || <callable> || None
"""
self._joiner = joiner
if joiner is not None:
self._kind = Column.Kind.Joined
self.setFlag(Column.Flags.ReadOnly)
self.setFlag(Column.Flags.Field, False) # joiner columns are not fields
def setMaxlength(self, length):
"""
Sets the maximum length for this column. Used when defining string \
column types.
:param length | <int>
"""
self._maxlength = length
def setPolymorphic(self, state):
"""
Sets whether or not this column defines a polymorphic mapper for
the table.
:sa setFlag
:param state | <bool>
"""
self.setFlag(Column.Flags.Polymorphic, state)
def setPrimary(self, primary):
"""
Sets whether or not this column is one of the primary columns \
for a table.
:sa setFlag
:param primary | <bool>
"""
self.setFlag(Column.Flags.Primary, primary)
def setPrivate(self, state):
"""
Sets whether or not this column should be treated as a private column.
:sa setFlag
:param state | <bool>
"""
self.setFlag(Column.Flags.Private, state)
def setAggregator(self, aggregator):
"""
Sets the query aggregate for this column to the inputted aggregate.
:param aggregator | <orb.ColumnAggregator> || None
"""
self._aggregator = aggregator
# defines this column as an aggregation value
# (does not explicitly live on the Table class)
if aggregator is not None:
self._kind = Column.Kind.Aggregate
self.setFlag(Column.Flags.ReadOnly)
def setReadOnly(self, state):
"""
Sets whether or not this column is a read only attribute.
:sa setFlag
:param state | <bool>
"""
self.setFlag(Column.Flags.ReadOnly, state)
def setReference(self, reference):
"""
Sets the name of the table schema instance that this column refers \
to from its schema.
:param reference | <str>
"""
self._reference = reference
def setReferenceRemovedAction(self, referencedAction):
"""
Sets how records for this column will act when the reference it
points to is removed.
:param referencedAction | <ReferencedAction>
"""
self._referenceRemovedAction = referencedAction
def setRequired(self, required):
"""
Sets whether or not this column is required in the database.
:sa setFlag
:param required | <bool>
"""
self.setFlag(Column.Flags.Required, required)
def setReversed(self, state):
"""
Sets whether or not this column generates a reverse accessor for \
lookups to its reference table.
:param state | <bool>
"""
self._reversed = state
def setReversedCached(self, state):
"""
Sets whether or not the reverse lookup for this column should be
cached.
:param state | <bool>
"""
self._reversedCached = state
def setReversedCacheTimeout(self, seconds):
"""
Sets the time in seconds that the cache will remain on the client side
before needing to request an update from the server. If the seconds
is 0, then the cache will never expire.
:param seconds | <int>
"""
self._reversedCacheTimeout = seconds
def setReversedName(self, reversedName):
"""
Sets the reversing name for the method that will be generated for the \
lookup to its reference table.
:param reversedName | <str>
"""
if reversedName is not None:
self._reversedName = reversedName
def setSearchable(self, state):
"""
Sets whether or not this column is used during record set searches.
:sa setFlag
:param state | <bool>
"""
self.setFlag(Column.Flags.Searchable, state)
def setSetterName(self, setterName):
"""
Sets the setter name for this column.
:param setterName | <str>
"""
if setterName is not None:
self._setterName = setterName
def setShortcut(self, shortcut):
"""
Sets the shortcut path for this column.
:param shortcut | <str>
"""
self._shortcut = shortcut
if self._joiner or shortcut:
self._kind = Column.Kind.Joined
elif self._kind == Column.Kind.Joined:
self._kind = Column.Kind.Field
def setStringFormat(self, formatter):
"""
Sets the string formatter for this column to the inputted text. This
will use Python's string formatting system to format values for the
column when it is displaying its value.
:param formatter | <str>
"""
self._stringFormat = formatter
def setTimezone(self, timezone):
"""
Sets the timezone associated directly to this column.
:sa <orb.Manager.setTimezone>
:param timezone | <pytz.tzfile> || None
"""
self._timezone = timezone
def setTranslatable(self, state):
"""
Returns whether or not this column is translatable.
:return <bool>
"""
self.setFlag(Column.Flags.Translatable, state)
def setUnique(self, state):
"""
Sets whether or not this column is unique in the database.
:param setFlag
:param unique | <bool>
"""
self.setFlag(Column.Flags.Unique, state)
def stringFormat(self):
"""
Returns the string formatter for this column to the inputted text. This
will use Python's string formatting system to format values for the
column when it is displaying its value.
:return <str>
"""
return self._stringFormat
def testFlag(self, flag):
"""
Tests to see if this column has the inputted flag set.
:param flag | <Column.Flags>
"""
return bool(self.flags() & flag) if flag >= 0 else not bool(self.flags() & ~flag)
def timezone(self, options=None):
"""
Returns the timezone associated specifically with this column. If
no timezone is directly associated, then it will return the timezone
that is associated with the system in general.
:sa <orb.Manager>
:param options | <orb.ContextOptions> || None
:return <pytz.tzfile> || None
"""
if self._timezone is None:
return self.schema().timezone(options)
return self._timezone
def toolTip(self, context='normal'):
example_values = {
'ForeignKey': '{0}.all().first()'.format(self.reference()),
'Integer': '0',
'String': '"example string"',
'Text': '"example text"',
'Bool': 'True',
'Email': 'me@example.com',
'Password': 'ex@mp1e',
'BigInt': '10',
'Date': 'datetime.date.today()',
'Datetime': 'datetime.datetime.now()',
'DatetimeWithTimezone': 'datetime.datetime.now()',
'Time': 'datetime.time(12, 0, 0)',
'Timestamp': 'datetime.datetime.now()',
'Timestamp_UTC': 'datetime.datetime.now()',
'Interval': 'datetime.timedelta(seconds=1)',
'Decimal': '0.0',
'Double': '0.0',
'Enum': 'Types.Example',
'Url': '"http://example.com"',
'Filepath': '"/path/to/example.png"',
'Directory': '"/path/to/example/"',
'Xml': '"<example></example>"',
'Html': '"<html><example></example></html>"',
'Color': '"#000"',
'Image': 'QtGui.QPixmap("/path/to/example.png")',
'ByteArray': 'QtCore.QByteArray()',
'Pickle': '{"example": 10}',
'Dict': '{"example": 10}',
'Query': 'orb.Query(column) == value'
}
example_returns = example_values.copy()
example_returns['ForeignKey'] = '&lt;{0}&gt;'.format(self.reference())
example_returns['Query'] = '&lt;orb.Query&gt;'
coltype = self.columnTypeText()
default = projex.text.underscore(self.name())
opts = {'name': self.name(),
'type': self.reference() if coltype == 'ForeignKey' else self.columnTypeText(),
'field': self.fieldName(),
'getter': self.getterName(),
'setter': self.setterName(),
'schema': self.schema().name(),
'display': self.displayName(),
'value': example_values.get(coltype, default),
'default': default,
'return': example_returns.get(coltype, example_values.get(coltype, default)),
'record': projex.text.underscore(self.schema().name()),
'ref_record': projex.text.underscore(self.reference())}
# show indexed information
if context == 'normal':
tip = '''\
<b>{schema}.{name} <small>({type})</small></b>
<pre>
>>> # example api usage
>>> {record} = {schema}()
>>> {record}.{setter}({value})
>>> {record}.{getter}()
{return}
>>> # meta data
>>> column = {schema}.schema().column('{name}')
>>> # ui display info
>>> column.displayName()
'{display}'
>>> # database field info
>>> column.fieldName()
'{field}'
</pre>'''
elif context == 'index' and self.indexed():
opts['index'] = self.indexName()
if self.unique():
tip = '''\
<b>{schema}.{name} <small>({schema} || None)</small></b>
<pre>
>>> # lookup record by index
>>> {schema}.{index}({default})
&lt;{schema}&gt;
</pre>
'''
else:
tip = '''\
<b>{schema}.{name} <small>(RecordSet([{schema}, ..]))</small></b>
<pre>
>>> # lookup records by index
>>> {schema}.{index}({default})
&lt;orb.RecordSet([&lt;{schema}&gt;, ..])&gt;
</pre>
'''
# show reversed information
elif context == 'reverse' and self.isReversed():
opts['reference'] = self.reference()
opts['reverse'] = self.reversedName()
if self.unique():
tip = '''\
<b>{reference}.{reverse} <small>({reference})</small></b><br>
<pre>
>>> # look up {schema} record through the reverse accessor
>>> {ref_record} = {reference}()
>>> {ref_record}.{reverse}()
&lt;{schema}&gt;
</pre>
'''
else:
tip = '''\
<b>{reference}.{reverse} <small>(RecordSet([{reference}, ..]))</small></b><br>
<pre>
>>> # look up {schema} records through the reverse accessor
>>> {ref_record} = {reference}()
>>> {ref_record}.{reverse}()
&lt;orb.RecordSet([&lt;{schema}&gt;, ..])&gt;
</pre>
'''
else:
tip = ''
return tip.format(**opts)
def toXml(self, xparent=None):
"""
Saves the data about this column out to xml as a child node for the
inputted parent.
:param xparent | <xml.etree.ElementTree.Element>
:return <xml.etree.ElementTree.Element>
"""
if xparent is not None:
xcolumn = ElementTree.SubElement(xparent, 'column')
else:
xcolumn = ElementTree.Element('column')
# save the properties
xcolumn.set('name', self.name())
# store as elements
ElementTree.SubElement(xcolumn, 'type').text = ColumnType[self.columnType()]
ElementTree.SubElement(xcolumn, 'display').text = self.displayName(False)
ElementTree.SubElement(xcolumn, 'getter').text = self.getterName()
ElementTree.SubElement(xcolumn, 'setter').text = self.setterName()
ElementTree.SubElement(xcolumn, 'field').text = self._fieldName
if self._shortcut:
ElementTree.SubElement(xcolumn, 'shortcut').text = self._shortcut
# store string format options
if self._stringFormat:
ElementTree.SubElement(xcolumn, 'format').text = self._stringFormat
if self.default():
ElementTree.SubElement(xcolumn, 'default').text = nstr(self.default())
# store additional options
if self.maxlength():
ElementTree.SubElement(xcolumn, 'maxlen').text = nstr(self.maxlength())
# store indexing options
if self.indexed():
xindex = ElementTree.SubElement(xcolumn, 'index')
xindex.text = self.indexName()
if self.indexCached():
xindex.set('cached', nstr(self.indexCached()))
xindex.set('timeout', nstr(self.indexCacheTimeout()))
# store flags
xflags = ElementTree.SubElement(xcolumn, 'flags')
for flag in Column.Flags.keys():
has_flag = self.testFlag(Column.Flags[flag])
if has_flag:
ElementTree.SubElement(xflags, flag)
# store referencing options
if self.reference():
xrelation = ElementTree.SubElement(xcolumn, 'relation')
ElementTree.SubElement(xrelation, 'table').text = self.reference()
ElementTree.SubElement(xrelation, 'removedAction').text = nstr(self.referenceRemovedAction())
if self.isReversed():
xreversed = ElementTree.SubElement(xrelation, 'reversed')
xreversed.text = self.reversedName()
if self.reversedCached():
xreversed.set('cached', nstr(self.reversedCached()))
xreversed.set('timeout', nstr(self.reversedCacheTimeout()))
return xcolumn
def unique(self):
"""
Returns whether or not this column should be unique in the
database.
:return <bool>
"""
return self.testFlag(Column.Flags.Unique)
def validate(self, value):
"""
Validates the inputted value against this columns rules. If the inputted value does not pass, then
a validation error will be raised.
:param value | <variant>
:return <bool> success
"""
for validator in self.validators():
validator.validate(self, value)
return True
def validators(self):
"""
Returns the regular expression pattern validator for this column.
:return [<orb.Validator>, ..]
"""
default = []
if self.required() and not self.autoIncrement():
default.append(orb.RequiredValidator())
return self._validators + default
def valueFromString(self, value, extra=None, db=None):
"""
Converts the inputted string text to a value that matches the type from
this column type.
:param value | <str>
extra | <variant>
"""
# convert the value from a string value via the data engine system
engine = self.engine(db)
if engine:
return engine.fromString(value)
# convert the value to a string using default values
coltype = ColumnType.base(self.columnType())
if coltype == ColumnType.Date:
if dateutil_parser:
return dateutil_parser.parse(value).date()
else:
extra = extra or '%Y-%m-%d'
time_struct = time.strptime(value, extra)
return datetime.date(time_struct.tm_year,
time_struct.tm_month,
time_struct.tm_day)
elif coltype == ColumnType.Time:
if dateutil_parser:
return dateutil_parser.parse(value).time()
else:
extra = extra or '%h:%m:%s'
time_struct = time.strptime(value, extra)
return datetime.time(time_struct.tm_hour,
time_struct.tm_min,
time_struct.tm_sec)
elif coltype in (ColumnType.Datetime, ColumnType.DatetimeWithTimezone):
if dateutil_parser:
return dateutil_parser.parse(value)
else:
extra = extra or '%Y-%m-%d %h:%m:s'
time_struct = time.strptime(value, extra)
return datetime.datetime(time_struct.tm_year,
time_struct.tm_month,
time_struct.tm_day,
time_struct.tm_hour,
time_struct.tm_minute,
time_struct.tm_sec)
elif coltype in (ColumnType.Timestamp, ColumnType.Timestamp_UTC):
try:
return datetime.datetime.fromtimestamp(float(value))
except StandardError:
if dateutil_parser:
return dateutil_parser.parse(value)
else:
return datetime.datetime.min()
elif coltype == ColumnType.Bool:
return nstr(value).lower() == 'true'
elif coltype in (ColumnType.Integer,
ColumnType.Double,
ColumnType.Decimal,
ColumnType.BigInt,
ColumnType.Enum):
try:
value = projex.text.safe_eval(value)
except ValueError:
value = 0
return value
return nstr(value)
def valueToString(self, value, extra=None, db=None):
"""
Converts the inputted string text to a value that matches the type from
this column type.
:sa engine
:param value | <str>
extra | <variant>
"""
# convert the value to a string value via the data engine system
engine = self.engine(db)
if engine:
return engine.toString(value)
# convert the value to a string using default values
coltype = ColumnType.base(self.columnType())
if coltype == ColumnType.Date:
if extra is None:
extra = '%Y-%m-%d'
return value.strftime(extra)
elif coltype == ColumnType.Time:
if extra is None:
extra = '%h:%m:%s'
return value.strftime(extra)
elif coltype == ColumnType.Datetime:
if extra is None:
extra = '%Y-%m-%d %h:%m:s'
return value.strftime(extra)
return nstr(value)
@staticmethod
def defaultDatabaseName(typ, name, reference=''):
"""
Returns the default schema name based on the current column templates.
:param typ | <str>
name | <str>
reference | <str> | table the column refers to
:return <str>
"""
# make sure we have an actual name to process
name = nstr(name).strip()
if not name:
return ''
# generate a reference field type
if typ == 'fieldName' and reference:
templ = Column.TEMPLATE_REFERENCE
# generate the default templates
else:
templ = Column.TEMPLATE_MAP.get(typ)
if templ is None:
return ''
return projex.text.render(templ, {'name': name, 'table': reference})
@staticmethod
def defaultPrimaryColumn(name):
"""
Creates a default primary column based on the inputted table schema.
:return <Column>
"""
# generate the auto column
fieldName = Column.defaultDatabaseName('primaryKey', name)
column = Column(ColumnType.Integer, 'id')
column.setPrimary(True)
column.setFieldName(fieldName)
column.setAutoIncrement(True)
column.setRequired(True)
column.setUnique(True)
return column
@staticmethod
def fromXml(xcolumn, referenced=False):
"""
Generates a new column from the inputted xml column data.
:param xcolumn | <xml.etree.Element>
:return <Column> || None
"""
try:
typ = xcolumn.find('type').text
except AttributeError:
typ = xcolumn.get('type')
try:
typ = ColumnType[typ]
except KeyError:
raise orb.errors.InvalidColumnType(typ)
name = xcolumn.get('name')
if typ is None or not name:
return None
# create the column
column = Column(typ, name, referenced=referenced)
try: # as of orb 4.3
column.setGetterName(xcolumn.find('getter').text)
except AttributeError:
column.setGetterName(xcolumn.get('getter'))
try:
column.setSetterName(xcolumn.find('setter').text)
except AttributeError:
column.setSetterName(xcolumn.get('setter'))
try:
column.setFieldName(xcolumn.find('field').text)
except AttributeError:
column.setFieldName(xcolumn.get('field'))
try:
column.setDisplayName(xcolumn.find('display').text)
except AttributeError:
column.setDisplayName(xcolumn.get('display'))
try:
column.setShortcut(xcolumn.find('shortcut').text)
except AttributeError:
column.setShortcut(xcolumn.get('shortcut', ''))
try:
column.setDefault(xcolumn.find('default').text)
except AttributeError:
column.setDefault(xcolumn.get('default', None))
try:
column.setDefault(int(xcolumn.find('maxlen').text))
except AttributeError:
maxlen = xcolumn.get('maxlen')
if maxlen is not None:
column.setMaxlength(int(maxlen))
# restore formatting options
try:
column._stringFormat = xcolumn.find('format').text
except AttributeError:
pass
# restore the flag information
flags = 0
xflags = xcolumn.find('flags')
if xflags is not None:
for xchild in xflags:
try:
flags |= Column.Flags[projex.text.classname(xchild.tag)]
except KeyError:
log.error('{0} is not a valid column flag.'.format(xchild.tag))
else:
for flag in Column.Flags.keys():
state = xcolumn.get(flag[0].lower() + flag[1:]) == 'True'
if state:
try:
flags |= Column.Flags[projex.text.classname(flag)]
except KeyError:
log.error('{0} is not a valid column flag.'.format(flag))
column.setFlags(flags)
# restore the index information
xindex = xcolumn.find('index')
if xindex is not None: # as of 4.3
column.setIndexName(xindex.text)
column.setIndexed(True)
column.setIndexCached(xindex.get('cached') == 'True')
column.setIndexCacheTimeout(int(xcolumn.get('timeout',
xcolumn.get('expires', column.indexCacheTimeout()))))
else:
# restore indexing options
column.setIndexName(xcolumn.get('index'))
column.setIndexed(xcolumn.get('indexed') == 'True')
column.setIndexCached(xcolumn.get('indexCached') == 'True')
column.setIndexCacheTimeout(int(xcolumn.get('indexCachedExpires',
column.indexCacheTimeout())))
# create relation information
xrelation = xcolumn.find('relation')
if xrelation is not None:
try:
column.setReference(xrelation.find('table').text)
except AttributeError:
column.setReference(xrelation.get('table', ''))
try:
action = int(xrelation.find('removedAction').text)
except StandardError:
action = int(xrelation.get('removedAction', 1))
column.setReferenceRemovedAction(action)
xreversed = xrelation.find('reversed')
if xreversed is not None:
column.setReversed(True)
column.setReversedName(xreversed.text)
column.setReversedCached(xreversed.get('cached') == 'True')
column.setReversedCacheTimeout(int(xreversed.get('timeout',
xreversed.get('expires',
column.reversedCacheTimeout()))))
else:
column.setReversed(xrelation.get('reversed') == 'True')
column.setReversedName(xrelation.get('reversedName'))
column.setReversedCached(xrelation.get('cached') == 'True')
column.setReversedCacheTimeout(int(xrelation.get('expires', column.reversedCacheTimeout())))
return column
"""
Defines methods for aggregation within the database system.
"""
from projex.lazymodule import lazy_import
orb = lazy_import('orb')
class ColumnAggregator(object):
def __init__(self,
type,
reference,
referenceColumn=None,
targetColumn=None,
where=None):
"""
Defines a new column aggregation mechanism. This class allows
defining a column on a schema that generates an <orb.QueryAggregate>
rather than an actual column in the backend database.
:param type | <orb.QueryAggregate.Type>
reference | <str>
referenceColumn | <str>
targetColumn | <str>
where | <orb.Query> or None
"""
self._aggregateType = type
self._reference = reference
self._referenceColumn = referenceColumn
self._targetColumn = targetColumn
self._where = where
def aggregateType(self):
"""
Returns the aggregation type for this instance.
:return <orb.QueryAggregate.Type>
"""
return self._aggregateType
def generate(self, column):
"""
Generates a new <orb.QueryAggregate> for the inputted <orb.Column>.
:param column | <orb.Column>
:return <orb.QueryAggregate>
"""
return orb.QueryAggregate(self.aggregateType(),
self.reference(),
column=self.referenceColumn(),
where=self.query(column))
def query(self, column):
"""
Generates an <orb.Query> instance to use for the where clause
for an <orb.QueryAggregate>.
:return <orb.Query>
"""
out = orb.Query()
# create the reference query
ref_col = self.referenceColumn()
if ref_col:
source = column.schema().model()
out &= orb.Query(ref_col) == orb.Query(source)
return out & self.where(column)
def reference(self):
"""
Returns the reference table that is associated with this aggregation.
:return <orb.Table> || None
"""
table = orb.system.model(self._reference)
if not table:
raise orb.errors.TableNotFound(self._reference)
return table
def referenceColumn(self):
"""
Returns the reference column that is associated with this aggregation.
:return <orb.Column> || None
"""
if self._reference and self._referenceColumn:
ref = orb.system.schema(self._reference)
if ref:
return ref.column(self._referenceColumn)
else:
raise orb.errors.TableNotFound(self._reference)
else:
return None
def targetColumn(self):
"""
Returns the target column that is used as the target for this
aggregation.
:return <orb.Column> || None
"""
if self._reference and self._targetColumn:
ref = orb.system.schema(self._reference)
if ref:
return ref.column(self._targetColumn)
else:
raise orb.errors.TableNotFound(self._reference)
else:
return None
def where(self, column):
"""
Returns an <orb.Query> for the inputted column.
:param column | <orb.Column>
:return <orb.Query> || None
"""
if callable(self._where):
return self._where(column)
return self._where
"""
Defines methods for joining columns within the database system.
"""
from projex.lazymodule import lazy_import
orb = lazy_import('orb')
class ColumnJoiner(object):
def __init__(self,
reference,
referenceColumn=None,
targetColumn=None,
where=None):
self._reference = reference
self._referenceColumn = referenceColumn
self._targetColumn = targetColumn
self._where = where
def query(self, column):
"""
Generates the where query for this joiner for the given column.
:param column | <orb.Column>
:return <orb.Query>
"""
where = self.where(column)
out = where if where is not None else orb.Query()
out &= orb.Query(self.referenceColumn()) == orb.Query(column.schema().model())
return out
def reference(self):
"""
Returns the reference model associated with this joiner.
:return subclass of <orb.Table>
"""
model = orb.system.model(self._reference)
if not model:
raise orb.errors.TableNotFound(self._reference)
return model
def referenceColumn(self):
"""
Returns the reference column associated with this joiner.
:return <orb.Column>
"""
ref = orb.system.schema(self._reference)
if not ref:
raise orb.errors.TableNotFound(self._reference)
return ref.column(self._referenceColumn)
def targetColumn(self):
"""
Returns the target column for the inputted joiner.
:return <orb.Column>
"""
ref = orb.system.schema(self._reference)
if not ref:
raise orb.errors.TableNotFound(self._reference)
return ref.column(self._targetColumn)
def where(self, column):
"""
Generates the join logic for the inputted column.
:param column | <orb.Column>
:return <orb.Query>
"""
if callable(self._where):
return self._where(column)
return self._where
"""
This module is a container for dynamically generated tables from ORB
files. There will never be any actual classes defined within this
module code, but if you generate a table and look for its module, it will
be `orb.schema.dynamic`. In this way, you can know that a class was generated
from an ORB file.
"""
""" Defines an indexing system to use when looking up records. """
import logging
import projex.text
from xml.etree import ElementTree
from projex.lazymodule import lazy_import
from projex.text import nativestring as nstr
log = logging.getLogger(__name__)
orb = lazy_import('orb')
errors = lazy_import('orb.errors')
class Index(object):
"""
Defines an indexed way to lookup information from a database.
Creating an Index generates an object that works like a method, however
has a preset query built into it, along with caching options.
"""
def __init__(self,
name='',
columns=None,
unique=False,
order=None,
cached=False,
referenced=False):
if columns is None:
columns = []
self.__name__ = name
self._schema = None
self._columnNames = columns
self._unique = unique
self._order = order
self._local_cache = {}
self._cache = {}
self._cached = cached
self._cacheTimeout = 0
self._referenced = referenced
def __call__(self, table, *values, **options):
# make sure we have the right number of arguments
if len(values) != len(self._columnNames):
name = self.__name__
columnCount = len(self._columnNames)
valueCount = len(values)
opts = (name, columnCount, valueCount)
text = '%s() takes exactly %i arguments (%i given)' % opts
raise TypeError(text)
data = tuple(hash(value) for value in values)
cache_key = (data,
hash(orb.LookupOptions(**options)),
options.get('db').name() if options.get('db') else '')
cache = self.cache(table)
if cache and cache_key in self._local_cache:
return self._local_cache[cache_key]
self._local_cache.pop(cache_key, None)
# create the lookup query
query = orb.Query()
for i, col in enumerate(self._columnNames):
value = values[i]
column = table.schema().column(col)
if (orb.Table.recordcheck(value) or orb.View.recordcheck(value)) and not value.isRecord():
if self._unique:
return None
return orb.RecordSet()
if not column:
name = table.schema().name()
raise errors.ColumnNotFound(name, col)
if column.isEncrypted():
value = orb.system.encrypt(value)
query &= orb.Query(col) == value
# include additional where option selection
if 'where' in options:
options['where'] = query & options['where']
else:
options['where'] = query
# selects the records from the database
options['context'] = table.schema().context(self.name())
if self._unique:
results = table.selectFirst(**options)
else:
results = table.select(**options)
# cache the results
if cache and results is not None:
self._local_cache[cache_key] = results
cache.setValue(cache_key, True, timeout=self._cacheTimeout)
return results
def cache(self, table):
"""
Returns the cache associated with this index for the given table.
:return <orb.TableCache> || None
"""
if not self.cached():
return None
if table not in self._cache:
self._cache[table] = table.tableCache()
return self._cache[table]
def cached(self):
"""
Returns whether or not the results for this index should be cached.
:return <bool>
"""
return self._cached
def cacheTimeout(self):
"""
Returns the number of seconds that this index will keep its cache
before reloading.
:return <int> | seconds
"""
return self._cacheTimeout
def columnNames(self):
"""
Returns the list of column names that this index will be expecting as \
inputs when it is called.
:return [<str>, ..]
"""
return self._columnNames
def isReferenced(self):
"""
Returns whether or not this
"""
return self._referenced
def name(self):
"""
Returns the name of this index.
:return <str>
"""
return self.__name__
def schema(self):
return self._schema
def setCached(self, state):
"""
Sets whether or not this index should cache the results of its query.
:param state | <bool>
"""
self._cached = state
def setCacheTimeout(self, seconds):
"""
Sets the time in seconds that this index will hold onto a client
side cache. If the value is 0, then the cache will never expire,
otherwise it will update after N seconds.
:param seconds | <int>
"""
self._cacheTimeout = seconds
def setColumnNames(self, columnNames):
"""
Sets the list of the column names that this index will use when \
looking of the records.
:param columnNames | [<str>, ..]
"""
self._columnNames = columnNames
def setOrder(self, order):
"""
Sets the order information for this index for how to sort and \
organize the looked up data.
:param order | [(<str> field, <str> direction), ..]
"""
self._order = order
def setName(self, name):
"""
Sets the name for this index to this index.
:param name | <str>
"""
self.__name__ = nstr(name)
def setUnique(self, state):
"""
Sets whether or not this index should find only a unique record.
:param state | <bool>
"""
self._unique = state
def setSchema(self, schema):
self._schema = schema
def unique(self):
"""
Returns whether or not the results that this index expects should be \
a unique record, or multiple records.
:return <bool>
"""
return self._unique
def toolTip(self, context='index'):
if self.unique():
tip = '''\
<b>{schema}.{name} <small>({schema} || None)</small></b>
<pre>
>>> # lookup record by index
>>> {schema}.{getter}({columns})
&lt;{schema}&gt;
</pre>
'''
else:
tip = '''\
<b>{schema}.{name} <small>(RecordSet([{schema}, ..]))</small></b>
<pre>
>>> # lookup records by index
>>> {schema}.{getter}({columns})
&lt;orb.RecordSet([&lt;{schema}&gt;, ..])&gt;
</pre>
'''
return tip.format(name=self.name(),
getter=self.name(),
schema=self.schema().name(),
record=projex.text.underscore(self.schema().name()),
columns=', '.join([projex.text.underscore(c) for c in self.columnNames()]))
def toXml(self, xparent):
"""
Saves the index data for this column to XML.
:param xparent | <xml.etree.ElementTree.Element>
:return <xml.etree.ElementTree.Element>
"""
xindex = ElementTree.SubElement(xparent, 'index')
xindex.set('name', self.name())
if self.unique():
xindex.set('unique', 'True')
if self.cached():
xindex.set('cached', nstr(self.cached()))
xindex.set('cacheTimeout', nstr(self._cacheTimeout))
for name in self.columnNames():
ElementTree.SubElement(xindex, 'column').text = name
return xindex
def validate(self, record, values):
"""
Validates whether or not this index's requirements are satisfied by the inputted record and
values. If this index fails validation, a ValidationError will be raised.
:param record | subclass of <orb.Table>
values | {<orb.Column>: <variant>, ..}
:return <bool>
"""
schema = record.schema()
try:
column_values = [values[schema.column(name)] for name in self.columnNames()]
except StandardError:
msg = 'Missing some columns ({0}) from {1}.{2}.'.format(', '.join(self.columnNames()),
record.schema().name(),
self.name())
raise errors.IndexValidationError(self, msg=msg)
# ensure a unique record is preserved
if self.unique():
lookup = getattr(record, self.name())
other = lookup(*column_values)
if other and other != record:
msg = 'A record already exists with the same {0} combination.'.format(', '.join(self.columnNames()))
raise errors.IndexValidationError(self, msg=msg)
return True
@staticmethod
def fromXml(xindex, referenced=False):
"""
Generates an index method descriptor from xml data.
:param xindex | <xml.etree.Element>
:return <Index> || None
"""
index = Index(referenced=referenced)
index.setName(xindex.get('name', ''))
index.setUnique(xindex.get('unique') == 'True')
index.setCached(xindex.get('cached') == 'True')
index.setCacheTimeout(int(xindex.get('cacheTimeout',
xindex.get('cachedExpires', index._cacheTimeout))))
xcolumns = xindex.findall('column')
if xcolumns:
columns = [xcolumn.text for xcolumn in xcolumns]
else:
columns = xindex.get('columns', '').split(',')
index.setColumnNames(columns)
return index
"""
Defines the main Table class that will be used when developing
database classes.
"""
# ------------------------------------------------------------------------------
import orb
from orb import errors
from new import instancemethod
TEMP_REVERSE_LOOKUPS = {}
GETTER_DOCS = """\
Gets the value for the {name} column.
{optdocs}
This method was auto-generated from ORB.
:param {optparams}
:return {returns}
"""
GETTER_I18N_DOCS = """\
:internationalization You can provide the optional locale parameter to get
the locale-specific value for this column. If no
locale is provided, the current global locale
defined in the [[Orb]] instance will be used.
"""
GETTER_FKEY_DOCS = """\
:references Foreign key references will be inflated to their
API class type. If you want simply the key value
from the database instead of a record, then you
can pass inflated = False. This
can improve overhead when working with large amounts
of records at one time.
"""
# ----------------------------------------------------------------------
SETTER_DOCS = """\
Sets the value for the {name} column to the inputted value. This will only
affect the value of the API object instance, and '''not''' the value in
the database. To commit this value to the database, use the [[Table::commit]]
method. This method will return a boolean whether or not a change in the
value actually occurred.
:param {param} | {returns}
{optparams}
:return <bool> | changed
"""
SETTER_I18N_DOCS = """\
:internationalization If this column is translatable, you can provide the
optional locale parameter to set the locale-specific
value for this column. If no locale is provided, the
current global locale defined in the [[Orb]] instance
will be used.
"""
# ------------------------------------------------------------------------------
class gettermethod(object):
""" Creates a method for tables to use as a field accessor. """
def __init__(self, **kwds):
"""
Defines the getter method that will be used when accessing
information about a column on a database record. This
class should only be used by the MetaTable class when
generating column methods on a model.
"""
self.__dict__.update(kwds)
self.columnName = kwds.get('columnName', '')
translatable = kwds.get('translatable', False)
inflatable = kwds.get('inflatable', False)
args = []
optdocs = []
optparams = []
if translatable:
args.append('locale=None')
optdocs.append(GETTER_I18N_DOCS)
optparams.append(' locale | None || <str> locale code')
if inflatable:
args.append('inflated=True')
optdocs.append(GETTER_FKEY_DOCS)
optparams.append(' inflated | <bool>')
default = '<variant> (see {0} for details)'.format(self.columnName)
returns = kwds.get('returns', default)
self.func_name = kwds['__name__']
self.func_args = '({0})'.format(', '.join(args))
self.func_doc = GETTER_DOCS.format(name=self.columnName,
optdocs='\n'.join(optdocs),
optparams='\n'.join(optparams),
returns=returns)
self.__dict__['__doc__'] = self.func_doc
def __call__(self, record, **options):
"""
Calls the getter lookup method for the database record.
:param record <Table>
"""
default = options.pop('default', None)
options['context'] = record.schema().context(self.func_name)
context = record.contextOptions(**options)
val = record.recordValue(self.columnName,
locale=context.locale,
default=default,
inflated=context.inflated,
useMethod=False)
if not context.inflated and orb.Table.recordcheck(val):
return val.primaryKey()
return val
#------------------------------------------------------------------------------
class settermethod(object):
""" Defines a method for setting database fields on a Table instance. """
def __init__(self, **kwds):
"""
Defines the setter method that will be used when accessing
information about a column on a database record. This
class should only be used by the MetaTable class when
generating column methods on a model
"""
self.__dict__.update(kwds)
self.columnName = kwds.get('columnName', '')
if kwds.get('inflatable'):
args = ['record_or_key']
else:
args = ['value']
optdocs = []
optparams = []
if kwds.get('translatable'):
args.append('locale=None')
optdocs.append(SETTER_I18N_DOCS)
optparams.append(' locale | None || <str>')
default = '<variant> (see {0} for details)'.format(self.columnName)
returns = kwds.get('returns', default)
self.func_name = kwds['__name__']
self.func_args = '({0})'.format(', '.join(args))
self.func_doc = SETTER_DOCS.format(name=self.columnName,
param=args[0],
optdocs='\n'.join(optdocs),
optparams='\n'.join(optparams),
returns=returns)
self.__dict__['__doc__'] = self.func_doc
def __call__(self, record, value, **kwds):
"""
Calls the setter method for the inputted database record.
:param record <Table>
value <variant>
"""
kwds['useMethod'] = False
return record.setRecordValue(self.columnName, value, **kwds)
#----------------------------------------------------------------------
class reverselookupmethod(object):
""" Defines a reverse lookup method for lookup up relations. """
def __init__(self, **kwds):
"""
Defines the getter method that will be used when accessing
information about a column on a database record. This
class should only be used by the MetaTable class when
generating column methods on a model.
"""
self.__dict__.update(kwds)
self.__lookup__ = True
self._cache = {}
self.reference = kwds.get('reference', '')
self.referenceDb = kwds.get('referenceDatabase', None)
self.columnName = kwds.get('columnName', '')
self.unique = kwds.get('unique', False)
self.cached = kwds.get('cached', False)
self.cacheTimeout = kwds.get('cacheTimeout', 0)
self.func_name = kwds['__name__']
self.func_args = '()'
self.func_doc = 'Auto-generated Orb reverse lookup method'
self.__dict__['__doc__'] = self.func_doc
def __call__(self, record, **options):
"""
Calls the getter lookup method for the database record.
:param record <Table>
"""
reload = options.pop('reload', False)
# remove any invalid query lookups
if 'where' in options and orb.Query.testNull(options['where']):
options.pop('where')
# lookup the records with a specific model
options.setdefault('locale', record.recordLocale())
table = options.get('table') or self.tableFor(record)
if not table:
return None if self.unique else orb.RecordSet()
# return from the cache when specified
cache = self.cache(table)
cache_key = (record.id(),
hash(orb.LookupOptions(**options)),
record.database().name())
preload_cache = getattr(record, '_Table__preload_cache', {})
if not reload and self.func_name in preload_cache:
out = preload_cache[self.func_name]
out.updateOptions(**options)
return out
preload_cache.pop(self.func_name, None)
# make sure this is a valid record
if not record.isRecord():
if self.unique:
return None
return orb.RecordSet()
# generate the reverse lookup query
reverse_q = orb.Query(self.columnName) == record
options['where'] = reverse_q & options.get('where')
options['db'] = record.database()
options['context'] = record.schema().context(self.func_name)
lookup = orb.LookupOptions(**options)
context = record.contextOptions(**options)
if self.unique:
output = table.selectFirst(lookup=lookup, options=context)
else:
output = table.select(lookup=lookup, options=context)
if isinstance(output, orb.RecordSet):
output.setSource(record)
output.setSourceColumn(self.columnName)
if cache and output is not None:
preload_cache[self.func_name] = output
setattr(record, '_Model__preload_cache', preload_cache)
cache.setValue(cache_key, True, timeout=self.cacheTimeout)
return output
def cache(self, table, force=False):
"""
Returns the cache for this table.
:param table | <subclass of orb.Table>
:return <orb.TableCache> || None
"""
try:
return self._cache[table]
except KeyError:
if force or self.cached:
cache = table.tableCache() or orb.TableCache(table, table.schema().cache(), timeout=self.cacheTimeout)
self._cache[table] = cache
return cache
return None
def preload(self, record, data, options, type='records'):
"""
Preload a list of records from the database.
:param record | <orb.Table>
data | [<dict>, ..]
lookup | <orb.LookupOptions> || None
options | <orb.ContextOptions> || None
"""
table = self.tableFor(record)
preload_cache = getattr(record, '_Model__preload_cache', {})
rset = preload_cache.get(self.func_name)
if rset is None:
rset = orb.RecordSet()
preload_cache[self.func_name] = rset
setattr(record, '_Model__preload_cache', preload_cache)
if type == 'ids':
rset.cache('ids', data)
elif type == 'count':
rset.cache('count', data)
elif type == 'first':
rset.cache('first', table(__values=data, options=options) if data else None)
elif type == 'last':
rset.cache('last', table(__values=data, options=options) if data else None)
else:
rset.cache('records', [table(__values=record, options=options) for record in data or []])
def tableFor(self, record):
"""
Returns the table for the inputted record.
:return <orb.Table>
"""
return record.polymorphicModel(self.reference) or orb.system.model(self.reference, database=self.referenceDb)
# -----------------------------------------------------------------------------
class MetaTable(type):
"""
Defines the table Meta class that will be used to dynamically generate
Table class types.
"""
def __new__(mcs, name, bases, attrs):
"""
Manages the creation of database model classes, reading
through the creation attributes and generating table
schemas based on the inputted information. This class
never needs to be expressly defined, as any class that
inherits from the Table class will be passed through this
as a constructor.
:param mcs <MetaTable>
:param name <str>
:param bases <tuple> (<object> base,)
:param attrs <dict> properties
:return <type>
"""
# ignore initial class
db_ignore = attrs.pop('__db_ignore__', False)
if db_ignore:
return super(MetaTable, mcs).__new__(mcs, name, bases, attrs)
base_tables = [base for base in bases if isinstance(base, MetaTable)]
base_data = {key: value for base_table in base_tables
for key, value in base_table.__dict__.items() if key.startswith('__db_')}
# define the default database information
db_data = {
'__db__': None,
'__db_group__': 'Default',
'__db_name__': name,
'__db_dbname__': '',
'__db_columns__': [],
'__db_indexes__': [],
'__db_pipes__': [],
'__db_contexts__': {},
'__db_views__': {},
'__db_schema__': None,
'__db_implements__': None,
'__db_inherits__': None,
'__db_abstract__': False,
'__db_archived__': False
}
# override with any inherited data
db_data.update(base_data)
# override with any custom data
db_data.update({key: value for key, value in attrs.items() if key.startswith('__db_')})
if not db_data['__db_inherits__'] and base_tables and base_tables[0].schema():
db_data['__db_inherits__'] = base_tables[0].schema().name()
# create a new model for this table
return mcs.createModel(mcs, name, bases, attrs, db_data)
@staticmethod
def createModel(mcs, name, bases, attrs, db_data):
"""
Create a new table model.
"""
# implement a new override class
if db_data['__db_implements__']:
new_base = orb.system.model(db_data['__db_implements__'], autoGenerate=True)
bases = tuple([new_base if issubclass(base, orb.Table) else base for base in bases])
schema = new_base.schema()
else:
schema = db_data['__db_schema__']
new_model = super(MetaTable, mcs).__new__(mcs, name, bases, attrs)
if schema:
db_data['__db_name__'] = schema.name()
db_data['__db_dbname__'] = schema.dbname()
try:
schema.setAutoLocalize(db_data['__db_autolocalize__'])
except KeyError:
pass
try:
schema.setAutoPrimary(db_data['__db_autoprimary__'])
except KeyError:
pass
if db_data['__db_columns__']:
columns = schema.columns(recurse=False) + db_data['__db_columns__']
schema.setColumns(columns)
if db_data['__db_indexes__']:
indexes = schema.indexes() + db_data['__db_indexes__']
schema.setIndexes(indexes)
if db_data['__db_pipes__']:
pipes = schema.pipes() + db_data['__db_pipes__']
schema.setPipes(pipes)
if db_data['__db_contexts__']:
contexts = dict(schema.contexts())
contexts.update(db_data['__db_contexts__'])
schema.setContexts(contexts)
if db_data['__db_views__']:
for name, view in db_data['__db_views__'].items():
schema.setView(name, view)
else:
# create the table schema
schema = orb.TableSchema()
schema.setDatabase(db_data['__db__'])
schema.setAutoPrimary(db_data.get('__db_autoprimary__', True))
schema.setAutoLocalize(db_data.get('__db_autolocalize__', False))
schema.setName(db_data['__db_name__'] or name)
schema.setGroupName(db_data['__db_group__'])
schema.setDbName(db_data['__db_dbname__'])
schema.setAbstract(db_data['__db_abstract__'])
schema.setColumns(db_data['__db_columns__'])
schema.setIndexes(db_data['__db_indexes__'])
schema.setPipes(db_data['__db_pipes__'])
schema.setInherits(db_data['__db_inherits__'])
schema.setArchived(db_data['__db_archived__'])
schema.setContexts(db_data['__db_contexts__'])
for name, view in db_data['__db_views__'].items():
schema.setView(name, view)
schema.setModel(new_model)
orb.system.registerSchema(schema)
db_data['__db_schema__'] = schema
# add the db values to the class
for key, value in db_data.items():
setattr(new_model, key, value)
# create class methods for the index instances
for index in schema.indexes():
iname = index.name()
if not hasattr(new_model, iname):
setattr(new_model, index.name(), classmethod(index))
# create instance methods for the pipe instances
for pipe in schema.pipes():
pname = pipe.name()
if not hasattr(new_model, pname):
pipemethod = instancemethod(pipe, None, new_model)
setattr(new_model, pname, pipemethod)
# pylint: disable-msg=W0212
columns = schema.columns(recurse=False)
for column in columns:
colname = column.name()
# create getter method
gname = column.getterName()
if gname and not hasattr(new_model, gname):
gmethod = gettermethod(columnName=colname,
translatable=column.isTranslatable(),
inflatable=column.isReference(),
returns=column.returnType(),
__name__=gname)
getter = instancemethod(gmethod, None, new_model)
setattr(new_model, gname, getter)
# create setter method
sname = column.setterName()
if sname and not (column.isReadOnly() or hasattr(new_model, sname)):
smethod = settermethod(columnName=colname,
translatable=column.isTranslatable(),
inflatable=column.isReference(),
returns=column.returnType(),
__name__=sname)
setter = instancemethod(smethod, None, new_model)
setattr(new_model, sname, setter)
# create an index if necessary
iname = column.indexName()
if column.indexed() and iname and not hasattr(new_model, iname):
index = orb.Index(iname,
[column.name()],
unique=column.unique())
index.setCached(column.indexCached())
index.setCacheTimeout(column.indexCacheTimeout())
index.__name__ = iname
imethod = classmethod(index)
setattr(new_model, iname, imethod)
# create a reverse lookup
if column.isReversed() and column.schema().name() == db_data['__db_name__']:
rev_name = column.reversedName()
rev_cached = column.reversedCached()
ref_name = column.reference()
try:
ref_model = column.referenceModel()
except errors.TableNotFound:
ref_model = None
rev_cacheTimeout = column.reversedCacheTimeout()
# create the lookup method
lookup = reverselookupmethod(columnName=column.name(),
reference=db_data['__db_name__'],
unique=column.unique(),
cached=rev_cached,
cacheTimeout=rev_cacheTimeout,
__name__=rev_name)
while ref_model and ref_model.__module__ != 'orb.schema.dynamic' and \
ref_model.__bases__ and ref_model.__bases__[0] == orb.Table:
ref_model = ref_model.__bases__[0]
# assign to an existing model
# ensure we're assigning it to the proper base module
if ref_model:
ilookup = instancemethod(lookup, None, ref_model)
setattr(ref_model, rev_name, ilookup)
else:
TEMP_REVERSE_LOOKUPS.setdefault(ref_name, [])
TEMP_REVERSE_LOOKUPS[ref_name].append((rev_name, lookup))
# assign any cached reverse lookups to this model
lookups = TEMP_REVERSE_LOOKUPS.pop(db_data['__db_name__'], [])
for rev_name, lookup in lookups:
ilookup = instancemethod(lookup, None, new_model)
setattr(new_model, rev_name, ilookup)
if db_data['__db_implements__']:
orb.system.setModel(db_data['__db_implements__'], new_model)
return new_model
"""
Defines the main View class that will be used when developing
database classes.
"""
# ------------------------------------------------------------------------------
import orb
from new import instancemethod
TEMP_REVERSE_LOOKUPS = {}
GETTER_DOCS = """\
Gets the value for the {name} column.
{optdocs}
This method was auto-generated from ORB.
:param {optparams}
:return {returns}
"""
GETTER_I18N_DOCS = """\
:internationalization You can provide the optional locale parameter to get
the locale-specific value for this column. If no
locale is provided, the current global locale
defined in the [[Orb]] instance will be used.
"""
GETTER_FKEY_DOCS = """\
:references Foreign key references will be inflated to their
API class type. If you want simply the key value
from the database instead of a record, then you
can pass inflated = False. This
can improve overhead when working with large amounts
of records at one time.
"""
#----------------------------------------------------------------------
SETTER_DOCS = """\
Sets the value for the {name} column to the inputted value. This will only
affect the value of the API object instance, and '''not''' the value in
the database. To commit this value to the database, use the [[View::commit]]
method. This method will return a boolean whether or not a change in the
value actually occurred.
:param {param} | {returns}
{optparams}
:return <bool> | changed
"""
SETTER_I18N_DOCS = """\
:internationalization If this column is translatable, you can provide the
optional locale parameter to set the locale-specific
value for this column. If no locale is provided, the
current global locale defined in the [[Orb]] instance
will be used.
"""
# ------------------------------------------------------------------------------
class gettermethod(object):
""" Creates a method for views to use as a field accessor. """
def __init__(self, **kwds):
"""
Defines the getter method that will be used when accessing
information about a column on a database record. This
class should only be used by the MetaView class when
generating column methods on a model.
"""
self.__dict__.update(kwds)
self.columnName = kwds.get('columnName', '')
translatable = kwds.get('translatable', False)
inflatable = kwds.get('inflatable', False)
args = []
optdocs = []
optparams = []
if translatable:
args.append('locale=None')
optdocs.append(GETTER_I18N_DOCS)
optparams.append(' locale | None || <str> locale code')
if inflatable:
args.append('inflated=True')
optdocs.append(GETTER_FKEY_DOCS)
optparams.append(' inflated | <bool>')
default = '<variant> (see {0} for details)'.format(self.columnName)
returns = kwds.get('returns', default)
self.func_name = kwds['__name__']
self.func_args = '({0})'.format(', '.join(args))
self.func_doc = GETTER_DOCS.format(name=self.columnName,
optdocs='\n'.join(optdocs),
optparams='\n'.join(optparams),
returns=returns)
self.__dict__['__doc__'] = self.func_doc
def __call__(self, record, **options):
"""
Calls the getter lookup method for the database record.
:param record <View>
"""
default = options.pop('default', None)
options['context'] = record.schema().context(self.func_name)
context = record.contextOptions(**options)
val = record.recordValue(self.columnName,
locale=context.locale,
default=default,
inflated=context.inflated,
useMethod=False)
if not context.inflated and orb.Table.recordcheck(val):
return val.primaryKey()
return val
#------------------------------------------------------------------------------
class settermethod(object):
""" Defines a method for setting database fields on a View instance. """
def __init__(self, **kwds):
"""
Defines the setter method that will be used when accessing
information about a column on a database record. This
class should only be used by the MetaView class when
generating column methods on a model
"""
self.__dict__.update(kwds)
self.columnName = kwds.get('columnName', '')
if kwds.get('inflatable'):
args = ['record_or_key']
else:
args = ['value']
optdocs = []
optparams = []
if kwds.get('translatable'):
args.append('locale=None')
optdocs.append(SETTER_I18N_DOCS)
optparams.append(' locale | None || <str>')
default = '<variant> (see {0} for details)'.format(self.columnName)
returns = kwds.get('returns', default)
self.func_name = kwds['__name__']
self.func_args = '({0})'.format(', '.join(args))
self.func_doc = SETTER_DOCS.format(name=self.columnName,
param=args[0],
optdocs='\n'.join(optdocs),
optparams='\n'.join(optparams),
returns=returns)
self.__dict__['__doc__'] = self.func_doc
def __call__(self, record, value, **kwds):
"""
Calls the setter method for the inputted database record.
:param record <View>
value <variant>
"""
kwds['useMethod'] = False
return record.setRecordValue(self.columnName, value, **kwds)
#----------------------------------------------------------------------
class reverselookupmethod(object):
""" Defines a reverse lookup method for lookup up relations. """
def __init__(self, **kwds):
"""
Defines the getter method that will be used when accessing
information about a column on a database record. This
class should only be used by the MetaView class when
generating column methods on a model.
"""
self.__dict__.update(kwds)
self.__lookup__ = True
self._cache = {}
self.reference = kwds.get('reference', '')
self.referenceDb = kwds.get('referenceDatabase', None)
self.columnName = kwds.get('columnName', '')
self.unique = kwds.get('unique', False)
self.cached = kwds.get('cached', False)
self.cacheTimeout = kwds.get('cacheTimeout', 0)
self.func_name = kwds['__name__']
self.func_args = '()'
self.func_doc = 'Auto-generated Orb reverse lookup method'
self.__dict__['__doc__'] = self.func_doc
def __call__(self, record, **options):
"""
Calls the getter lookup method for the database record.
:param record <View>
"""
reload = options.pop('reload', False)
# remove any invalid query lookups
if 'where' in options and orb.Query.testNull(options['where']):
options.pop('where')
# lookup the records with a specific model
options.setdefault('locale', record.recordLocale())
view = options.get('view') or self.viewFor(record)
if not view:
return None if self.unique else orb.RecordSet()
# return from the cache when specified
cache = self.cache(view)
cache_key = (record.id(),
hash(orb.LookupOptions(**options)),
record.database().name())
if not reload and cache_key in self._local_cache:
out = self._local_cache[cache_key]
out.updateOptions(**options)
return out
self._local_cache.pop(cache_key, None)
# make sure this is a valid record
if not record.isRecord():
if self.unique:
return None
return orb.RecordSet()
# generate the reverse lookup query
reverse_q = orb.Query(self.columnName) == record
options['where'] = reverse_q & options.get('where')
options['db'] = record.database()
options['context'] = record.schema().context(self.func_name)
lookup = orb.LookupOptions(**options)
context = record.contextOptions(**options)
if self.unique:
output = view.selectFirst(lookup=lookup, options=context)
else:
output = view.select(lookup=lookup, options=context)
if isinstance(output, orb.RecordSet):
output.setSource(record)
output.setSourceColumn(self.columnName)
if cache and output is not None:
self._local_cache[cache_key] = output
cache.setValue(cache_key, True, timeout=self.cacheTimeout)
return output
def cache(self, view, force=False):
"""
Returns the cache for this view.
:param view | <subclass of orb.View>
:return <orb.ViewCache> || None
"""
try:
return self._cache[view]
except KeyError:
if force or self.cached:
cache = view.viewCache() or orb.ViewCache(view, view.schema().cache(), timeout=self.cacheTimeout)
self._cache[view] = cache
return cache
return None
def preload(self, record, data, options, type='records'):
"""
Preload a list of records from the database.
:param record | <orb.View>
data | [<dict>, ..]
lookup | <orb.LookupOptions> || None
options | <orb.ContextOptions> || None
"""
view = self.viewFor(record)
preload_cache = getattr(record, '_Model__preload_cache', {})
rset = preload_cache.get(self.func_name)
if rset is None:
rset = orb.RecordSet()
preload_cache[self.func_name] = rset
setattr(record, '_Model__preload_cache', preload_cache)
if type == 'ids':
rset.cache('ids', data)
elif type == 'count':
rset.cache('count', data)
elif type == 'first':
rset.cache('first', view(__values=data, options=options) if data else None)
elif type == 'last':
rset.cache('last', view(__values=data, options=options) if data else None)
else:
rset.cache('records', [view(__values=record, options=options) for record in data or []])
def viewFor(self, record):
"""
Returns the view for the inputted record.
:return <orb.View>
"""
return record.polymorphicModel(self.reference) or orb.system.model(self.reference, database=self.referenceDb)
# -----------------------------------------------------------------------------
class MetaView(type):
"""
Defines the view Meta class that will be used to dynamically generate
View class types.
"""
def __new__(mcs, name, bases, attrs):
"""
Manages the creation of database model classes, reading
through the creation attributes and generating view
schemas based on the inputted information. This class
never needs to be expressly defined, as any class that
inherits from the View class will be passed through this
as a constructor.
:param mcs <MetaView>
:param name <str>
:param bases <tuple> (<object> base,)
:param attrs <dict> properties
:return <type>
"""
# ignore initial class
db_ignore = attrs.pop('__db_ignore__', False)
if db_ignore:
return super(MetaView, mcs).__new__(mcs, name, bases, attrs)
base_views = [base for base in bases if isinstance(base, MetaView)]
base_data = {key: value for base_view in base_views
for key, value in base_view.__dict__.items() if key.startswith('__db_')}
# define the default database information
db_data = {
'__db__': None,
'__db_group__': 'Default',
'__db_name__': name,
'__db_viewname__': '',
'__db_columns__': [],
'__db_cached__': False,
'__db_indexes__': [],
'__db_pipes__': [],
'__db_contexts__': {},
'__db_views__': {},
'__db_cache__': {},
'__db_schema__': None,
'__db_inherits__': None,
'__db_abstract__': False,
'__db_archived__': False,
'__db_static__': False,
}
# override with any inherited data
db_data.update(base_data)
# override with any custom data
db_data.update({key: value for key, value in attrs.items() if key.startswith('__db_')})
if not db_data['__db_inherits__'] and base_views and base_views[0].schema():
db_data['__db_inherits__'] = base_views[0].schema().name()
# create a new model for this view
return mcs.createModel(mcs, name, bases, attrs, db_data)
@staticmethod
def createModel(mcs, name, bases, attrs, db_data):
"""
Create a new view model.
"""
new_model = super(MetaView, mcs).__new__(mcs, name, bases, attrs)
schema = db_data['__db_schema__']
if schema:
db_data['__db_name__'] = schema.name()
db_data['__db_viewname__'] = schema.viewName()
if db_data['__db_columns__']:
columns = schema.columns(recurse=False) + db_data['__db_columns__']
schema.setColumns(columns)
if db_data['__db_indexes__']:
indexes = schema.indexes() + db_data['__db_indexes__']
schema.setIndexes(indexes)
if db_data['__db_pipes__']:
pipes = schema.pipes() + db_data['__db_pipes__']
schema.setPipes(pipes)
if db_data['__db_contexts__']:
contexts = dict(schema.contexts())
contexts.update(db_data['__db_contexts__'])
schema.setContexts(contexts)
if db_data['__db_views__']:
for name, view in db_data['__db_views__'].items():
schema.setView(name, view)
else:
# create the view schema
schema = orb.ViewSchema()
schema.setDatabase(db_data['__db__'])
schema.setAutoPrimary(False)
schema.setName(db_data['__db_name__'] or name)
schema.setGroupName(db_data['__db_group__'])
schema.setDbName(db_data['__db_viewname__'])
schema.setAbstract(db_data['__db_abstract__'])
schema.setColumns(db_data['__db_columns__'])
schema.setIndexes(db_data['__db_indexes__'])
schema.setPipes(db_data['__db_pipes__'])
schema.setInherits(db_data['__db_inherits__'])
schema.setArchived(db_data['__db_archived__'])
schema.setContexts(db_data['__db_contexts__'])
schema.setStatic(db_data['__db_static__'])
schema.setCacheEnabled(db_data['__db_cache__'].get('enabled', db_data['__db_cache__'].get('preload')))
schema.setPreloadCache(db_data['__db_cache__'].get('preload'))
schema.setCacheTimeout(db_data['__db_cache__'].get('timeout', 0))
for name, view in db_data['__db_views__'].items():
schema.setView(name, view)
schema.setModel(new_model)
orb.system.registerSchema(schema)
db_data['__db_schema__'] = schema
# add the db values to the class
for key, value in db_data.items():
setattr(new_model, key, value)
# create class methods for the index instances
for index in schema.indexes():
iname = index.name()
if not hasattr(new_model, iname):
setattr(new_model, index.name(), classmethod(index))
# create instance methods for the pipe instances
for pipe in schema.pipes():
pname = pipe.name()
if not hasattr(new_model, pname):
pipemethod = instancemethod(pipe, None, new_model)
setattr(new_model, pname, pipemethod)
# pylint: disable-msg=W0212
columns = schema.columns(recurse=False)
for column in columns:
colname = column.name()
# create getter method
gname = column.getterName()
if gname and not hasattr(new_model, gname):
gmethod = gettermethod(columnName=colname,
translatable=column.isTranslatable(),
inflatable=column.isReference(),
returns=column.returnType(),
__name__=gname)
getter = instancemethod(gmethod, None, new_model)
setattr(new_model, gname, getter)
# create setter method
sname = column.setterName()
if sname and not (column.isReadOnly() or hasattr(new_model, sname)):
smethod = settermethod(columnName=colname,
translatable=column.isTranslatable(),
inflatable=column.isReference(),
returns=column.returnType(),
__name__=sname)
setter = instancemethod(smethod, None, new_model)
setattr(new_model, sname, setter)
# create an index if necessary
iname = column.indexName()
if column.indexed() and iname and not hasattr(new_model, iname):
index = orb.Index(iname,
[column.name()],
unique=column.unique())
index.setCached(column.indexCached())
index.setCacheTimeout(column.indexCacheTimeout())
index.__name__ = iname
imethod = classmethod(index)
setattr(new_model, iname, imethod)
# create a reverse lookup
if column.isReversed() and column.schema().name() == db_data['__db_name__']:
rev_name = column.reversedName()
rev_cached = column.reversedCached()
ref_name = column.reference()
ref_model = column.referenceModel()
rev_cacheTimeout = column.reversedCacheTimeout()
# create the lookup method
lookup = reverselookupmethod(columnName=column.name(),
reference=db_data['__db_name__'],
unique=column.unique(),
cached=rev_cached,
cacheTimeout=rev_cacheTimeout,
__name__=rev_name)
# ensure we're assigning it to the proper base module
while ref_model and ref_model.__module__ != 'orb.schema.dynamic' and \
ref_model.__bases__ and ref_model.__bases__[0] == orb.View:
ref_model = ref_model.__bases__[0]
# assign to an existing model
if ref_model:
ilookup = instancemethod(lookup, None, ref_model)
setattr(ref_model, rev_name, ilookup)
else:
TEMP_REVERSE_LOOKUPS.setdefault(ref_name, [])
TEMP_REVERSE_LOOKUPS[ref_name].append((rev_name, lookup))
# assign any cached reverse lookups to this model
lookups = TEMP_REVERSE_LOOKUPS.pop(db_data['__db_name__'], [])
for rev_name, lookup in lookups:
ilookup = instancemethod(lookup, None, new_model)
setattr(new_model, rev_name, ilookup)
return new_model
""" Defines an piping system to use when accessing multi-to-multi records. """
import projex.text
from orb import errors
from projex.lazymodule import lazy_import
from projex.text import nativestring as nstr
from xml.etree import ElementTree
orb = lazy_import('orb')
class Pipe(object):
"""
Defines a piped way to lookup information from a database.
Creating an Pipe generates an object that works like a method, however
has a preset query built into it allowing multi-to-multi connections
"""
def __init__(self, name, **options):
self.__name__ = name
self._schema = options.get('schema', None)
self._pipeReference = options.get('pipeReference', options.get('through', ''))
self._pipeTable = None
self._sourceColumn = options.get('sourceColumn', options.get('source', ''))
self._targetReference = options.get('targetReference', '')
self._targetTable = None
self._targetColumn = options.get('targetColumn', options.get('target', ''))
self._cache = {}
self._cached = options.get('cached', False)
self._cacheTimeout = options.get('cacheTimeout', 0)
self._referenced = options.get('referenced', False)
self._unique = options.get('unique', False)
def __call__(self, record, **options):
# return a blank piperecordset
if not record.isRecord():
return orb.PipeRecordSet([], record)
reload = options.get('reload', False)
pipeTable = self.pipeReferenceModel()
targetTable = self.targetReferenceModel()
if None in (pipeTable, targetTable):
return orb.PipeRecordSet([], record)
# update the caching information
pipe_cache = self.cache(pipeTable)
target_cache = self.cache(targetTable)
cache_key = (record.id() if record else 0,
hash(orb.LookupOptions(**options)),
options.get('db', record.database()).name())
# ensure neither the pipe nor target table have timeout their caches
preload_cache = getattr(record, '_Model__preload_cache', {})
if not reload and self.__name__ in preload_cache:
out = preload_cache[self.__name__]
out.updateOptions(**options)
return out
preload_cache.pop(self.__name__, None)
# create the query for the pipe
sub_q = orb.Query(pipeTable, self._sourceColumn) == record
rset = pipeTable.select(columns=[self._targetColumn], where=sub_q)
q = orb.Query(targetTable).in_(rset)
if 'where' in options:
options['where'] = q & options['where']
else:
options['where'] = q
# generate the new record set
options['context'] = record.schema().context(self.name())
lookup = orb.LookupOptions(**options)
context = record.contextOptions(**options)
records = targetTable.select(lookup=lookup, options=context)
# map this recordset to a pipe set (if the options generate a pipe set)
if isinstance(records, orb.RecordSet):
rset_cls = record.schema().context(self.__name__).get('RecordSet', orb.PipeRecordSet)
rset = rset_cls(records,
record,
pipeTable,
self._sourceColumn,
self._targetColumn)
rset.setLookupOptions(lookup)
rset.setContextOptions(context)
preload_cache[self.__name__] = rset
setattr(record, '_Model__preload_cache', preload_cache)
if pipe_cache:
pipe_cache.setValue(cache_key, True, timeout=self._cacheTimeout)
if target_cache:
target_cache.setValue(cache_key, True, timeout=self._cacheTimeout)
else:
rset = records
if self._unique:
if isinstance(rset, orb.RecordSet):
return rset.first()
else:
try:
return rset[0]
except IndexError:
return None
return rset
def cache(self, table, force=False):
"""
Returns the cache for the inputted table.
:param table | <subclass of orb.Table>
:return <orb.TableCache> || None
"""
try:
return self._cache[table]
except KeyError:
if force or self.cached():
cache = table.tableCache() or orb.TableCache(table, orb.system.cache(), timeout=self._cacheTimeout)
self._cache[table] = cache
return cache
else:
return None
def cached(self):
"""
Returns whether or not the results for this index should be cached.
:return <bool>
"""
return self._cached
def cacheTimeout(self):
"""
Returns the time in seconds to store the cached results for this pipe.
:return <int>
"""
return self._cacheTimeout
def isReferenced(self):
"""
Returns whether or not this
"""
return self._referenced
def name(self):
"""
Returns the name of this index.
:return <str>
"""
return self.__name__
def preload(self, record, data, options, type='records'):
"""
Preloads the inputted record and result values.
:param record | <orb.Table>
data | [<dict>, ..]
lookup | <orb.LookupOptions>
options | <orb.DatabaseOptions>
type | <str>
"""
target_model = self.targetReferenceModel()
pipe_model = self.pipeReferenceModel()
# define the pipe cached value
preload_cache = getattr(record, '_Model__preload_cache', {})
pset = preload_cache.get(self.__name__)
if pset is None:
pset = orb.PipeRecordSet(orb.RecordSet(),
record,
pipe_model,
self._sourceColumn,
self._targetColumn)
preload_cache[self.__name__] = pset
setattr(record, '_Model__preload_cache', preload_cache)
# update teh cache for the dataset
if type == 'ids':
pset.cache('ids', data)
elif type == 'count':
pset.cache('count', data)
elif type == 'first':
pset.cache('first', target_model(__values=data, options=options) if data else None)
elif type == 'last':
pset.cache('last', target_model(__values=data, options=options) if data else None)
elif type == 'records':
pset.cache('records', [target_model(__values=record, options=options) for record in data or []])
def pipeReference(self):
return self._pipeReference
def pipeReferenceModel(self):
if self._pipeTable is None:
pipeTable = orb.system.model(self._pipeReference)
if pipeTable and not self._targetReference:
col = pipeTable.schema().column(self._targetColumn)
self._targetReference = col.reference()
self._pipeTable = pipeTable
if not self._pipeTable:
raise errors.TableNotFound(self._pipeReference)
return self._pipeTable
def setCached(self, state):
"""
Sets whether or not this index should cache the results of its query.
:param state | <bool>
"""
self._cached = state
def setCacheTimeout(self, seconds):
"""
Sets the time in seconds to store the cached results for this pipe
set.
:param seconds | <int>
"""
self._cacheTimeout = seconds
def setUnique(self, unique):
self._unique = unique
def setName(self, name):
"""
Sets the name for this index to this index.
:param name | <str>
"""
self.__name__ = nstr(name)
def setPipeReference(self, reference):
self._pipeReference = reference
def setSourceColumn(self, column):
self._sourceColumn = column
def setTargetColumn(self, column):
self._targetColumn = column
def setTargetReference(self, reference):
self._targetReference = reference
def schema(self):
return self._schema
def setSchema(self, schema):
self._schema = schema
def sourceColumn(self):
return self._sourceColumn
def sourceReferenceModel(self):
model = self.pipeReferenceModel()
column = model.schema().column(self.sourceColumn())
return column.referenceModel()
def targetColumn(self):
return self._targetColumn
def targetReference(self):
if not self._targetReference:
try:
pipe = orb.system.schema(self.pipeReference())
col = pipe.column(self.targetColumn())
self._targetReference = col.reference()
except AttributeError:
pass
return self._targetReference
def targetReferenceModel(self):
if self._targetTable is None:
self._targetTable = orb.system.model(self.targetReference())
if not self._targetTable:
raise errors.TableNotFound(self.targetReference())
return self._targetTable
def toolTip(self, context='pipe'):
tip = '''\
<b>{schema}.{name} <small>(RecordSet([{target_model}, ..]))</small></b>
<pre>
>>> # piped through {pipe} model
>>> {record} = {schema}()
>>> {record}.{getter}()
&lt;orb.PipeRecordSet([&lt;{target_model}&gt;, ..])&gt;
>>> # modify the joined records
>>> {target_record} = {target_model}.all().first()
>>> {record}.{getter}().addRecord({target_record})
>>> {record}.{getter}().removeRecord({target_record})
</pre>
'''
return tip.format(name=self.name(),
getter=self.name(),
schema=self.schema().name(),
record=projex.text.underscore(self.schema().name()),
pipe=self.pipeReference(),
source=self.sourceColumn(),
target=self.targetColumn(),
target_model=self.targetReference(),
target_record=projex.text.underscore(self.targetReference()))
def toXml(self, xparent):
"""
Saves the index data for this column to XML.
:param xparent | <xml.etree.ElementTree.Element>
:return <xml.etree.ElementTree.Element>
"""
xpipe = ElementTree.SubElement(xparent, 'pipe')
xpipe.set('name', self.name())
if self._unique:
xpipe.set('unique', 'True')
if self.cached():
xpipe.set('cached', nstr(self.cached()))
xpipe.set('timeout', nstr(self._cacheTimeout))
ElementTree.SubElement(xpipe, 'through').text = self._pipeReference
ElementTree.SubElement(xpipe, 'source').text = self._sourceColumn
ElementTree.SubElement(xpipe, 'target').text = self._targetColumn
if self._targetReference:
ElementTree.SubElement(xpipe, 'table').text = self._targetReference
return xpipe
def unique(self):
return self._unique
@staticmethod
def fromXml(xpipe, referenced=False):
"""
Generates an index method descriptor from xml data.
:param xindex | <xml.etree.Element>
:return <Index> || None
"""
pipe = Pipe(xpipe.get('name', ''), referenced=referenced)
pipe.setUnique(xpipe.get('unique') == 'True')
pipe.setCached(xpipe.get('cached') == 'True')
pipe.setCacheTimeout(int(xpipe.get('timeout', xpipe.get('expires', pipe._cacheTimeout))))
try:
pipe.setPipeReference(xpipe.find('through').text)
except AttributeError:
pipe.setPipeReference(xpipe.get('pipeReference', ''))
try:
pipe.setSourceColumn(xpipe.find('source').text)
except AttributeError:
pipe.setSourceColumn(xpipe.get('sourceColumn', ''))
try:
pipe.setTargetReference(xpipe.find('table').text)
except AttributeError:
pipe.setTargetReference(xpipe.get('targetReference', ''))
try:
pipe.setTargetColumn(xpipe.find('target').text)
except AttributeError:
pipe.setTargetColumn(xpipe.get('targetColumn', ''))
return pipe

Sorry, the diff of this file is too big to display

""" Defines a grouping system for schemas. """
import logging
import os.path
import xml.parsers.expat
import sys
from projex.lazymodule import lazy_import
from projex.text import nativestring as nstr
from xml.etree import ElementTree
import projex
log = logging.getLogger(__name__)
orb = lazy_import('orb')
class TableGroup(object):
def __init__(self, name='', referenced=False, manager=None):
self._name = name
self._manager = manager or orb.system
# reference information
self._requires = []
self._module = ''
self._filename = ''
self._order = 0
self._referenced = referenced
self._databaseName = ''
self._namespace = ''
self._properties = {}
self._useModelPrefix = False
self._modelPrefix = ''
def addSchema(self, schema):
"""
Adds the inputted schema to the group.
:param schema | <orb.TableSchema>
"""
schema.setGroup(self)
def database(self):
"""
Returns the database linked with this group.
:return <orb.Database> || None
"""
return orb.system.database(self.databaseName())
def databaseName(self):
"""
Returns the default database name for this schema group.
:return <str>
"""
return self._databaseName
def filename(self):
"""
Returns the filename string for this instance.
:return <str>
"""
return self._filename
def isReference(self):
"""
Returns whether or not this group is referring to an external module.
:return <bool>
"""
return self.isReferenced()
def isReferenced(self):
"""
Returns whether or not this group is referenced from a separate file.
:return <bool>
"""
return self._referenced
def merge(self):
"""
Merges the group from its reference information.
"""
modname = self.module()
if not modname:
return 0
requires = [modname.split('.')[0]] + self.requires()
projex.requires(*requires)
try:
__import__(modname)
except ImportError:
log.exception('Could not import: %s.' % modname)
return 0
module = sys.modules[modname]
basepath = os.path.dirname(module.__file__)
filename = os.path.join(basepath, self.name().lower() + '.orb')
try:
xorb = ElementTree.parse(nstr(filename)).getroot()
except xml.parsers.expat.ExpatError:
log.exception('Failed to load ORB file: %s' % filename)
return False
# load schemas
count = 0
xgroups = xorb.find('groups')
for xgroup in xgroups:
if xgroup.get('name') != self.name():
continue
xschemas = xgroup.find('schemas')
if xschemas is None:
return 0
for xschema in xschemas:
schema = orb.TableSchema.fromXml(xschema)
schema.setGroupName(self.name())
schema.setDatabaseName(self.databaseName())
self.addSchema(schema)
count += 1
return count
def modelPrefix(self):
"""
Returns the string that will be used as the prefix for generating
all models for a particular group. This is useful when keeping
typed objects together under a common namespace.
:return <str>
"""
return self._modelPrefix
def module(self):
"""
Returns the module that this group is coming from.
:return <str>
"""
return self._module
def name(self):
"""
Returns the name for this group.
:return <str>
"""
return self._name
def namespace(self):
"""
Returns the namespace for this group. If no namespace is explicitly
defined, then the global namespace is returned.
:return <str>
"""
if self._namespace:
return self._namespace
db = self.database()
if db:
return db.namespace()
return orb.system.namespace()
def order(self):
"""
Returns the order that this group should be loaded in.
:return <int>
"""
return self._order
def property(self, key, default=''):
"""
Returns the property value for the inputted key string.
:param key | <str>
default | <str>
:return <str>
"""
return self._properties.get(nstr(key), nstr(default))
# noinspection PyMethodMayBeStatic
def removeSchema(self, schema):
"""
Removes the inputted schema from this group.
:param schema | <orb.TableSchema>
"""
schema.setGroup(None)
def requires(self):
"""
Returns the requirements to pass to the projex environment system \
for this group.
:return [<str>, ..]
"""
return self._requires
def schemas(self):
"""
Returns a list of schemas that are linked with this group.
:return [<orb.TableSchema>, ..]
"""
return [schema for schema in self._manager.schemas() if schema.group() == self]
def setDatabaseName(self, name):
"""
Sets the default database name for this schema to the inputted name.
:param name | <str>
"""
self._databaseName = name
def setModelPrefix(self, prefix):
"""
Sets the string that will be used as the prefix for generating
all models for a particular group. This is useful when keeping
typed objects together under a common namespace.
:param prefix | <str>
"""
self._modelPrefix = prefix
def setModule(self, module):
"""
Sets the module name for this group to the inputted module name.
:param module | <str>
"""
self._module = module
def setName(self, name):
"""
Sets the name for this group to the inputted name.
:param name | <str>
"""
self._name = name
def setFilename(self, filename):
"""
Sets the filename for this instance to the inputted filename.
:param filename | <str>
"""
self._filename = filename
def setNamespace(self, namespace):
"""
Sets the namespace that will be used for this system.
:param namespace | <str>
"""
self._namespace = namespace
def setOrder(self, order):
"""
Sets the order that this group should be loaded in.
:param order | <int>
"""
self._order = order
def setProperty(self, key, value):
"""
Sets the property value for the given key, value pairing.
:param key | <str>
value | <str>
"""
self._properties[nstr(key)] = nstr(value)
def setRequires(self, requires):
"""
Sets the requirements for this system to the inputted modules.
:param requires | [<str>, ..]
"""
self._requires = requires[:]
def setUseModelPrefix(self, state=True):
self._useModelPrefix = state
def toXml(self, xparent):
"""
Saves the schema group to the inputted xml.
:param xparent | <xml.etree.ElementTree.Element>
:return <xml.etree.ElementTree.Element>
"""
xgroup = ElementTree.SubElement(xparent, 'group')
xgroup.set('name', self.name())
if self.isReferenced():
xgroup.set('referenced', 'True')
else:
if self.databaseName():
xgroup.set('db', self.databaseName())
if self._namespace:
xgroup.set('namespace', self._namespace)
if self.useModelPrefix():
xgroup.set('usePrefix', str(self.useModelPrefix()))
xgroup.set('prefix', self.modelPrefix())
# save the properties
xprops = ElementTree.SubElement(xgroup, 'properties')
for key, value in self._properties.items():
xprop = ElementTree.SubElement(xprops, 'property')
xprop.set('key', key)
xprop.set('value', value)
# save reference information
if self.module():
xgroup.set('module', self.module())
xgroup.set('requires', ','.join(self.requires()))
if not self.module():
xschemas = ElementTree.SubElement(xgroup, 'schemas')
for schema in sorted(self.schemas(), key=lambda x: x.name()):
if not schema.isReferenced():
schema.toXml(xschemas)
return xgroup
def useModelPrefix(self):
return self._useModelPrefix
@staticmethod
def fromXml(xgroup, referenced=False, database=None, manager=None):
"""
Loads the schema group from the inputted xml schema data.
:param xgroup | <xml.etree.ElementTree.Element>
referenced | <bool>
database | <str> || None
manager | <orb.Manager> || None
:return (<orb.TableGroup>, [<orb.TableSchema>, ..]) || (None, [])
"""
# load schemas
grpname = xgroup.get('name')
dbname = xgroup.get('db', xgroup.get('dbname'))
modname = xgroup.get('module')
# force database to import
if database is not None:
dbname = database
# import a reference file
if modname:
requires = xgroup.get('requires', '').split(',')
while '' in requires:
requires.remove('')
projex.requires(*requires)
try:
__import__(modname)
except ImportError:
log.exception('Error importing group plugin: %s' % modname)
return None, []
grp = orb.system.group(grpname, database=dbname)
if not grp:
return None, []
grp.setDatabaseName(dbname)
grp.setModule(modname)
grp.setRequires(requires)
# load properties
xprops = xgroup.find('properties')
if xprops is not None:
for xprop in xprops:
grp.setProperty(xprop.get('key'), xprop.get('value'))
return None, []
# import non-referenced schemas
else:
grp = orb.system.group(grpname, database=dbname)
if not grp:
grp = TableGroup(referenced=referenced, manager=manager)
grp.setName(grpname)
grp.setModelPrefix(xgroup.get('prefix', ''))
grp.setUseModelPrefix(xgroup.get('useModelPrefix') == 'True')
grp.setNamespace(xgroup.get('namespace', ''))
if dbname is not None:
grp.setDatabaseName(dbname)
# load schemas
schemas = []
xschemas = xgroup.find('schemas')
if xschemas is not None:
for xschema in xschemas:
schema = orb.TableSchema.fromXml(xschema, referenced)
schema.setGroup(grp)
grp.addSchema(schema)
schemas.append(schema)
if dbname is not None:
schema.setDatabaseName(dbname)
# load properties
xprops = xgroup.find('properties')
if xprops is not None:
for xprop in xprops:
grp.setProperty(xprop.get('key'), xprop.get('value'))
return grp, schemas
""" Defines the meta information for a Table class. """
import logging
import projex.text
from projex.lazymodule import lazy_import
from projex.text import nativestring as nstr
from xml.etree import ElementTree
from .meta.metatable import MetaTable
from . import dynamic
log = logging.getLogger(__name__)
orb = lazy_import('orb')
errors = lazy_import('orb.errors')
class TableSchema(object):
"""
Contains meta data information about a table as it maps to a database.
"""
TEMPLATE_PREFIXED = '[prefix::underscore::lower]_[name::underscore::lower]'
TEMPLATE_TABLE = '[name::underscore::plural::lower]'
_customHandlers = []
def __cmp__(self, other):
# check to see if this is the same instance
if id(self) == id(other):
return 0
# make sure this instance is a valid one for the other kind
if not isinstance(other, TableSchema):
return -1
# compare inheritance level
my_ancestry = self.ancestry()
other_ancestry = other.ancestry()
result = cmp(len(my_ancestry), len(other_ancestry))
if not result:
# compare groups
my_group = self.group()
other_group = other.group()
if my_group is not None and other_group is not None:
result = cmp(my_group.order(), other_group.order())
elif my_group:
result = -1
else:
result = 1
if not result:
return cmp(self.name(), other.name())
return result
return result
def __init__(self, referenced=False):
self._abstract = False
self._autoPrimary = True
self._autoLocalize = False
self._name = ''
self._databaseName = ''
self._groupName = ''
self._dbname = ''
self._inherits = ''
self._stringFormat = ''
self._namespace = ''
self._displayName = ''
self._inheritedLoaded = False
self._cache = None
self._cacheTimeout = 0
self._cacheEnabled = False
self._preloadCache = False
self._columns = set()
self._columnIndex = {}
self._views = {}
self._properties = {}
self._indexes = []
self._pipes = []
self._additionalSearchColumns = []
self._contexts = {}
self._database = None
self._timezone = None
self._defaultOrder = None
self._group = None
self._primaryColumns = None
self._model = None
self._archiveModel = None
self._referenced = referenced
self._searchEngine = None
self._validators = []
self._archived = False
def addColumn(self, column):
"""
Adds the inputted column to this table schema.
:param column | <orb.Column>
"""
self._columns.add(column)
if not column._schema:
column._schema = self
def additionalSearchColumns(self):
"""
Returns a list of additional columns to use during search.
:return [<str>, ..]
"""
return self._additionalSearchColumns
def addIndex(self, index):
"""
Adds the inputted index to this table schema.
:param index | <orb.Index>
"""
if index in self._indexes:
return
index.setSchema(self)
self._indexes.append(index)
def addPipe(self, pipe):
"""
Adds the inputted pipe reference to this table schema.
:param pipe | <orb.Pipe>
"""
if pipe in self._pipes:
return
pipe.setSchema(self)
self._pipes.append(pipe)
def addValidator(self, validator):
"""
Adds a record validators that are associated with this schema. You
can define different validation addons for a table's schema that will process when
calling the Table.validateRecord method.
:param validator | <orb.AbstractRecordValidator>
"""
self._validators.append(validator)
def ancestor(self):
"""
Returns the direct ancestor for this schema that it inherits from.
:return <TableSchema> || None
"""
if self.inherits():
return orb.system.schema(self.inherits())
return None
def ancestry(self):
"""
Returns the different inherited schemas for this instance.
:return [<TableSchema>, ..]
"""
if not self._inherits:
return []
schema = orb.system.schema(self.inherits())
if not schema:
return []
return schema.ancestry() + [schema]
def ancestryQuery(self):
"""
Returns the query that will link this schema to its ancestors.
:return <Query> || <QueryCompound>
"""
if not self.inherits():
return None
from orb import Query as Q
query = Q()
ancestry = self.ancestry()
schema = self
ifield_templ = orb.system.settings().inheritField()
for ancest in reversed(ancestry):
afield = ifield_templ.format(table=ancest.databaseName())
sfield = ifield_templ.format(table=schema.databaseName())
if ancest.inherits():
query &= Q(ancest.model(), afield) == Q(schema.model(), sfield)
else:
query &= Q(ancest.model()) == Q(schema.model(), sfield)
schema = ancest
return query
def ancestryModels(self):
"""
Returns the ancestry models for this schema.
:return [<subclass of orb.Table>, ..]
"""
return [x.model() for x in self.ancestry()]
def archiveModel(self):
return self._archiveModel
def autoLocalize(self):
"""
Specifies whether or not this table should automatically localize the search results.
When this is on, and the table has a `locale` column, it will automatically be filtered
out for the lookup results. By default, this option will be off.
:return <bool>
"""
return self._autoLocalize
def autoPrimary(self):
"""
Returns whether or not this schema auto-generates the primary key. \
This is useful when defining reusable schemas that could be applied \
to various backends, for instance an auto-increment column for a \
PostgreSQL database vs. a string column for a MongoDB. By default, it \
is recommended this value remain True.
:return <bool>
"""
return self._autoPrimary
def baseAncestor(self):
"""
Returns the base ancestor for this schema.
:return <orb.TableSchema>
"""
if not self.inherits():
return self
ancestry = self.ancestry()
if ancestry:
return ancestry[0]
return self
def cache(self):
"""
Returns the table cache associated with this schema.
:return <orb.caching.TableCache>
"""
return self._cache or self.database().cache()
def cacheTimeout(self):
"""
Returns the number of seconds that the caching system will use before
clearing its cache data for this table.
:return <int>
"""
return self._cacheTimeout
def column(self, name, recurse=True, flags=0, kind=0):
"""
Returns the column instance based on its name.
If error reporting is on, then the ColumnNotFound
error will be thrown the key inputted is not a valid
column name.
:param name | <str>
recurse | <bool>
flags | <int>
kind | <int>
:return <orb.Column> || None
"""
key = (name, recurse, flags, kind)
key = hash(key)
# lookup the existing cached record
try:
return self._columnIndex[key]
except KeyError:
pass
# lookup the new record
parts = nstr(name).split('.')
part = parts[0]
next_parts = parts[1:]
# generate the primary columns
found = None
for column in self.columns(recurse=recurse, flags=flags, kind=kind):
if column.isMatch(part):
found = column
break
# lookup referenced joins
if found is not None and next_parts:
refmodel = found.referenceModel()
if refmodel:
refschema = refmodel.schema()
next_part = '.'.join(next_parts)
found = refschema.column(next_part, recurse=recurse, flags=flags, kind=kind)
else:
found = None
# cache this lookup for the future
self._columnIndex[key] = found
return found
def columnNames(self, recurse=True, flags=0, kind=0):
"""
Returns the list of column names that are defined for
this table schema instance.
:return <list> [ <str> columnName, .. ]
"""
return sorted([x.name() for x in self.columns(recurse=recurse, flags=flags, kind=kind)])
def columns(self, recurse=True, flags=0, kind=0):
"""
Returns the list of column instances that are defined
for this table schema instance.
:param recurse | <bool>
flags | <orb.Column.Flags>
kind | <orb.Column.Kind>
:return <list> [ <orb.Column>, .. ]
"""
# generate the primary columns for this schema
self.primaryColumns()
output = {column for column in self._columns if
(not flags or column.testFlag(flags)) and
(not kind or column.isKind(kind))}
if kind & orb.Column.Kind.Proxy and self._model:
output.update(self._model.proxyColumns())
if recurse and self.inherits():
schema = self.inheritsSchema()
if not schema:
raise errors.TableNotFound(self.inherits())
ancest_columns = schema.columns(recurse=recurse, flags=flags, kind=kind)
dups = output.intersection(ancest_columns)
if dups:
dup_names = ','.join([x.name() for x in dups])
raise errors.DuplicateColumnFound(self.name(), dup_names)
output.update(ancest_columns)
return list(output)
def context(self, name):
"""
Returns the context for this schema. This will define different override options when a particular
call is made to the table within a given context.
:param name | <str>
:return <dict>
"""
return self._contexts.get(name, {})
def contexts(self):
"""
Returns the full set of contexts for this schema.
:return {<str> context name: <dict> context, ..}
"""
return self._contexts
def databaseName(self):
"""
Returns the name of the database that this schema will be linked to.
:return <str>
"""
if self._databaseName:
return self._databaseName
else:
try:
return self.group().databaseName()
except AttributeError:
return ''
def database(self):
"""
Returns the database that is linked with the current schema.
:return <Database> || None
"""
if self._database is not None:
return self._database
dbname = self.databaseName()
if dbname:
return orb.system.database(dbname)
return orb.system.database()
def defaultColumns(self):
"""
Returns the columns that should be used by default when querying for
this table.
:return [<orb.Column>, ..]
"""
flag = orb.Column.Flags.IgnoreByDefault
return [col for col in self.columns() if not col.testFlag(flag)]
def defaultOrder(self):
"""
Returns the default order to be used when querying this schema.
:return [(<str> columnName, <str> asc|desc), ..] || None
"""
return self._defaultOrder
def displayName(self):
"""
Returns the display name for this table.
:return <str>
"""
if not self._displayName:
return projex.text.pretty(self.name())
return self._displayName
def fieldNames(self, recurse=True, flags=0, kind=0):
"""
Returns the list of column instances that are defined
for this table schema instance.
:param recurse | <bool>
:return <list> [ <str>, .. ]
"""
return [col.fieldName() for col in self.columns(recurse=recurse, flags=flags, kind=kind)]
def generateModel(self):
"""
Generates the default model class for this table schema, if no \
default model already exists. The new generated table will be \
returned.
:return <subclass of Table> || None
"""
if self._model:
return self._model
# generate the base models
if self.inherits():
inherits = self.inherits()
inherited = orb.system.schema(inherits)
if not inherited:
raise orb.errors.TableNotFound(inherits)
else:
base = inherited.model(autoGenerate=True)
if base:
bases = [base]
else:
bases = [orb.system.baseTableType()]
else:
bases = [orb.system.baseTableType()]
# generate the attributes
attrs = {'__db_schema__': self, '__module__': 'orb.schema.dynamic'}
grp = self.group()
prefix = ''
if grp:
prefix = grp.modelPrefix()
# generate archive layer
if self.isArchived():
# create the archive column
archive_columns = []
colname = projex.text.camelHump(self.name())
# create a duplicate of the existing columns, disabling translations since we'll store
# a single record per change
found_locale = False
for column in self.columns(recurse=False):
if column.name() == 'id':
continue
new_column = column.copy()
new_column.setTranslatable(False)
new_column.setUnique(False)
archive_columns.append(new_column)
if column.name() == 'locale':
found_locale = True
archive_columns += [
# primary key for the archives is a reference to the article
orb.Column(orb.ColumnType.ForeignKey,
colname,
fieldName='{0}_archived_id'.format(projex.text.underscore(self.name())),
required=True,
reference=self.name(),
reversed=True,
reversedName='archives'),
# and its version
orb.Column(orb.ColumnType.Integer,
'archiveNumber',
required=True),
# created the archive at method
orb.Column(orb.ColumnType.DatetimeWithTimezone,
'archivedAt',
default='now')
]
archive_indexes = [
orb.Index('byRecordAndVersion', [colname, 'archiveNumber'], unique=True)
]
# store data per locale
if not found_locale:
archive_columns.append(orb.Column(orb.ColumnType.String,
'locale',
fieldName='locale',
required=True,
maxLength=5))
# create the new archive schema
archive_name = '{0}Archive'.format(self.name())
archive_schema = TableSchema()
archive_schema.setDatabaseName(self.databaseName())
archive_schema.setName(archive_name)
archive_schema.setDbName('{0}_archives'.format(projex.text.underscore(self.name())))
archive_schema.setColumns(archive_columns)
archive_schema.setIndexes(archive_indexes)
archive_schema.setAutoLocalize(self.autoLocalize())
archive_schema.setArchived(False)
archive_schema.setDefaultOrder([('archiveNumber', 'asc')])
# define the class properties
class_data = {
'__module__': 'orb.schema.dynamic',
'__db_schema__': archive_schema
}
model = MetaTable(archive_name, tuple(bases), class_data)
archive_schema.setModel(model)
self.setArchiveModel(model)
orb.system.registerSchema(archive_schema)
setattr(dynamic, archive_name, model)
# finally, create the new model
cls = MetaTable(prefix + self.name(), tuple(bases), attrs)
setattr(dynamic, cls.__name__, cls)
return cls
def generatePrimary(self):
"""
Auto-generates the primary column for this schema based on the \
current settings.
:return [<orb.Column>, ..] || None
"""
if orb.system.settings().editOnlyMode():
return None
db = self.database()
if not (db and db.backend()):
return None
# create the default primary column from the inputted type
return [db.backend().defaultPrimaryColumn()]
def group(self):
"""
Returns the schema group that this schema is related to.
:return <orb.TableGroup> || None
"""
if not self._group:
dbname = self._databaseName
grp = orb.system.group(self.groupName(), database=dbname)
self._group = grp
return self._group
def groupName(self):
"""
Returns the name of the group that this schema is a part of in the \
database.
:return <str>
"""
if self._group:
return self._group.name()
return self._groupName
def hasColumn(self, column, recurse=True, flags=0, kind=0):
"""
Returns whether or not this column exists within the list of columns
for this schema.
:return <bool>
"""
return column in self.columns(recurse=recurse, flags=flags, kind=kind)
def hasTranslations(self):
for col in self.columns():
if col.isTranslatable():
return True
return False
def indexes(self, recurse=True):
"""
Returns the list of indexes that are associated with this schema.
:return [<orb.Index>, ..]
"""
return self._indexes[:]
def inherits(self):
"""
Returns the name of the table schema that this class will inherit from.
:return <str>
"""
return self._inherits
def inheritsModel(self):
return orb.system.model(self.inherits()) if self.inherits() else None
def inheritsSchema(self):
return orb.system.schema(self.inherits()) if self.inherits() else None
def inheritsRecursive(self):
"""
Returns all of the tables that this table inherits from.
:return [<str>, ..]
"""
output = []
inherits = self.inherits()
while inherits:
output.append(inherits)
table = orb.system.schema(inherits)
if not table:
break
inherits = table.inherits()
return output
def isAbstract(self):
"""
Returns whether or not this schema is an abstract table. Abstract \
tables will not register to the database, but will serve as base \
classes for inherited tables.
:return <bool>
"""
return self._abstract
def isArchived(self):
"""
Returns whether or not this schema is archived. Archived schema's will store additional records
each time a record is created or updated for historical reference.
:return <bool>
"""
return self._archived
def isCacheEnabled(self):
"""
Returns whether or not caching is enabled for this Table instance.
:sa setCacheEnabled
:return <bool>
"""
return self._cacheEnabled
def isReferenced(self):
"""
Returns whether or not this schema is referenced from an external file.
:return <bool>
"""
return self._referenced
def model(self, autoGenerate=False):
"""
Returns the default Table class that is associated with this \
schema instance.
:param autoGenerate | <bool>
:return <subclass of Table>
"""
if self._model is None and autoGenerate:
self._model = self.generateModel()
return self._model
def name(self):
"""
Returns the name of this schema object.
:return <str>
"""
return self._name
def namespace(self):
"""
Returns the namespace of this schema object. If no namespace is
defined, then its group namespace is utilized.
:return <str>
"""
if self._namespace:
return self._namespace
grp = self.group()
if grp:
return grp.namespace()
else:
db = self.database()
if db:
return db.namespace()
return orb.system.namespace()
def polymorphicColumn(self):
"""
Returns the first column that defines the polymorphic table type
for this table.
:return <orb.Column> || None
"""
for column in self.columns():
if column.isPolymorphic():
return column
return None
def pipe(self, name):
"""
Returns the pipe that matches the inputted name.
:return <orb.Pipe> || None
"""
for pipe in self._pipes:
if pipe.name() == name:
return pipe
return None
def pipes(self, recurse=True):
"""
Returns a list of the pipes for this instance.
:return [<orb.Pipe>, ..]
"""
return self._pipes[:]
def preloadCache(self):
"""
Returns whether or not this cache will be preloaded into memory.
:return <bool>
"""
return self._preloadCache
def primaryColumn(self):
"""
Returns the primary column for this table. If this table has no or multiple primary columns defined,
then a PrimaryKeyNotDefined will be raised, otherwise the first column will be returned.
:return <orb.Column>
"""
cols = self.primaryColumns()
if len(cols) == 1:
return cols[0]
else:
raise orb.PrimaryKeyNotDefined(self)
def primaryColumns(self, db=None):
"""
Returns the primary key columns for this table's
schema.
:return <tuple> (<orb.Column>,)
"""
if self._primaryColumns:
return self._primaryColumns
if self.autoPrimary() and not self._inherits:
cols = self.generatePrimary()
if cols is not None:
for col in cols:
self.addColumn(col)
else:
cols = None
# generate the primary column list the first time
if cols is None and self._inherits:
inherited = orb.system.schema(self._inherits)
if not inherited:
raise errors.TableNotFound(self._inherits)
else:
cols = inherited.primaryColumns()
if cols is not None:
self._primaryColumns = cols[:]
return self._primaryColumns
else:
return []
def property(self, key, default=None):
"""
Returns the custom data that was stored on this table at the inputted \
key. If the key is not found, then the default value will be returned.
:param key | <str>
default | <variant>
:return <variant>
"""
return self._properties.get(nstr(key), default)
def removeColumn(self, column):
"""
Removes the inputted column from this table's schema.
:param column | <orb.Column>
"""
try:
self._columns.remove(column)
column._schema = None
except KeyError:
pass
def removeIndex(self, index):
"""
Removes the inputted index from this table's schema.
:param index | <orb.Index>
"""
try:
self._indexes.remove(index)
except ValueError:
pass
def removePipe(self, pipe):
"""
Removes the inputted pipe from this table's schema.
:param pipe | <orb.Pipe>
"""
try:
self._pipes.remove(pipe)
except ValueError:
pass
def removeValidator(self, validator):
"""
Removes a record validators that are associated with this schema. You
can define different validation addons for a table's schema that will process when
calling the Table.validateRecord method.
:param validator | <orb.AbstractRecordValidator>
"""
try:
self._validators.remove(validator)
except ValueError:
pass
def reverseLookup(self, name):
"""
Returns the reverse lookup that matches the inputted name.
:return <orb.Column> || None
"""
return {column.reversedName(): column for schema in orb.system.schemas()
for column in schema.columns()
if column.reference() == self.name() and column.isReversed()}.get(name)
def reverseLookups(self):
"""
Returns a list of all the reverse-lookup columns that reference this schema.
:return [<orb.Column>, ..]
"""
return [column for schema in orb.system.schemas()
for column in schema.columns()
if column.reference() == self.name() and column.isReversed()]
def searchEngine(self):
"""
Returns the search engine that will be used for this system.
:return <orb.SearchEngine>
"""
if self._searchEngine:
return self._searchEngine
else:
return orb.system.searchEngine()
def searchableColumns(self, recurse=True, flags=0, kind=0):
"""
Returns a list of the searchable columns for this schema.
:return [<str>, ..]
"""
addtl = self.additionalSearchColumns()
return self.columnNames(recurse=recurse, flags=flags | orb.Column.Flags.Searchable, kind=kind) + addtl
def setAbstract(self, state):
"""
Sets whether or not this table is abstract.
:param state | <bool>
"""
self._abstract = state
def setAdditionalSearchColumns(self, columns):
"""
Sets a list of additional columns to use during search.
:param columns | [<str>, ..]
"""
self._additionalSearchColumns = columns
def setAutoLocalize(self, state):
"""
Sets whether or not this schema will automatically filter our localized results. This
will filter records based on the locale lookup if a `locale` column is defined.
:param state | <bool>
"""
self._autoLocalize = state
def setAutoPrimary(self, state):
"""
Sets whether or not this schema will use auto-generated primary keys.
:sa autoPrimary
:return <bool>
"""
self._autoPrimary = state
def setArchived(self, state=True):
"""
Sets the archive state for this schema.
:param state | <bool>
"""
self._archived = state
def setArchiveModel(self, model):
self._archiveModel = model
def setCache(self, cache):
"""
Sets the table cache for this instance.
:param cache | <orb.caching.TableCache>
"""
self._cache = cache
def setCacheEnabled(self, state):
"""
Sets whether or not to enable caching on the Table instance this schema
belongs to. When caching is enabled, all the records from the table
database are selected the first time a select is called and
then subsequent calls to the database are handled by checking what is
cached in memory. This is useful for small tables that don't change
often (such as a Status or Type table) and are referenced frequently.
To have the cache clear automatically after a number of minutes, set the
cacheTimeout method.
:param state | <bool>
"""
self._cacheEnabled = state
if self._cache:
self._cache.setEnabled(state)
def setCacheTimeout(self, seconds):
"""
Sets the number of seconds that the table should clear its cached
results from memory and re-query the database. If the value is 0, then
the cache will have to be manually cleared.
:param seconds | <int>
"""
self._cacheTimeout = seconds
def setColumns(self, columns):
"""
Sets the columns that this schema uses.
:param columns | [<orb.Column>, ..]
"""
self._columns = set(columns)
# pylint: disable-msg=W0212
for column in columns:
if not column._schema:
column._schema = self
def setContext(self, name, context):
"""
Sets the context for this schema. This will define different override options when a particular
call is made to the table within a given context.
:param name | <str>
context | <dict>
"""
self._contexts[name] = dict(context)
def setContexts(self, contexts):
"""
Sets the full context set for this table to the inputted dictionary of contexts.
:param contexts | {<str> context name: <dict>, ..}
"""
self._contexts = dict(contexts)
def setDefaultOrder(self, order):
"""
Sets the default order for this schema to the inputted order. This
will be used when an individual query for this schema does not specify
an order explicitly.
:param order | [(<str> columnName, <str> asc|desc), ..] || None
"""
self._defaultOrder = order
def setProperty(self, key, value):
"""
Sets the custom data at the given key to the inputted value.
:param key | <str>
value | <variant>
"""
self._properties[nstr(key)] = value
def setDatabase(self, database):
"""
Sets the database name that this schema will be linked to.
:param database | <orb.Database> || <str> || None
"""
if isinstance(database, orb.Database):
self._database = database
elif database is None:
self._database = None
else:
self._database = None
self.setDatabaseName(database)
def setDatabaseName(self, databaseName):
"""
Sets the database name that this schema will be linked to.
:param databaseName | <str>
"""
self._databaseName = nstr(databaseName)
def setDisplayName(self, name):
"""
Sets the display name for this table.
:param name | <str>
"""
self._displayName = name
def setModel(self, model):
"""
Sets the default Table class that is associated with this \
schema instance.
:param model | <subclass of Table>
"""
self._model = model
def setNamespace(self, namespace):
"""
Sets the namespace that will be used for this schema to the inputted
namespace.
:param namespace | <str>
"""
self._namespace = namespace
def setIndexes(self, indexes):
"""
Sets the list of indexed lookups for this schema to the inputted list.
:param indexes | [<orb.Index>, ..]
"""
self._indexes = indexes[:]
for index in self._indexes:
index.setSchema(self)
def setInherits(self, name):
"""
Sets the name for the inherited table schema to the inputted name.
:param name | <str>
"""
self._inherits = name
def setName(self, name):
"""
Sets the name of this schema object to the inputted name.
:param name | <str>
"""
self._name = name
def setGroup(self, group):
"""
Sets the group association for this schema to the inputted group.
:param group | <orb.TableGroup>
"""
self._group = group
self._groupName = group.name() if group else ''
def setGroupName(self, groupName):
"""
Sets the group name that this table schema will be apart of.
:param groupName | <str>
"""
self._groupName = groupName
def setPipes(self, pipes):
"""
Sets the pipe methods that will be used for this schema.
:param pipes | [<orb.Pipes>, ..]
"""
self._pipes = pipes
def setPreloadCache(self, state):
"""
Sets whether or not to preload all records for this table as
a cache.
:param state | <bool>
"""
self._preloadCache = state
def setSearchEngine(self, engine):
"""
Sets the search engine that will be used for this system.
:param engine | <orb.SearchEngine>
"""
self._searchEngine = engine
def setStringFormat(self, text):
"""
Sets a string format to be used when rendering a table using the str()
method. This is a python string format with dictionary keys for the
column values that you want to display.
:param format | <str>
"""
self._stringFormat = nstr(text)
def setDbName(self, dbname):
"""
Sets the name that will be used in the actual database. If the \
name supplied is blank, then the default database name will be \
used based on the group and name for this schema.
:param dbname | <str>
"""
self._dbname = dbname
def setTimezone(self, timezone):
"""
Sets the timezone associated directly to this database.
:sa <orb.Orb.setTimezone>
:param timezone | <pytz.tzfile> || None
"""
self._timezone = timezone
def setValidators(self, validators):
"""
Sets the list of the record validators that are associated with this schema. You
can define different validation addons for a table's schema that will process when
calling the Table.validateRecord method.
:param validators | [<orb.AbstractRecordValidator>, ..]
"""
self._validators = validators
def setView(self, name, view):
"""
Adds a new view to this schema. Views provide pre-built dynamically joined tables that can
give additional information to a table.
:param name | <str>
view | <orb.View>
"""
self._views[name] = view
def stringFormat(self):
"""
Returns the string format style for this schema.
:return <str>
"""
return self._stringFormat
def dbname(self):
"""
Returns the name that will be used for the table in the database.
:return <str>
"""
if not self._dbname:
grp = self.group()
prefix = grp.modelPrefix() if grp and grp.useModelPrefix() else ''
self._dbname = self.defaultDbName(self.name(), prefix)
return self._dbname
def timezone(self, options=None):
"""
Returns the timezone associated specifically with this database. If
no timezone is directly associated, then it will return the timezone
that is associated with the Orb system in general.
:sa <orb.Orb>
:param options | <orb.ContextOptions>
:return <pytz.tzfile> || None
"""
if self._timezone is None:
return self.database().timezone(options)
return self._timezone
def toolTip(self, context='normal'):
tip = '''\
<b>{name} <small>(Table from {group} group)</small></b><br>
<em>Usage</em>
<pre>
>>> # api usage
>>> record = {name}()
>>> record.commit()
>>> all_records = {name}.all()
>>> some_records = {name}.select(where=query)
>>> # meta data
>>> schema = {name}.schema()
>>> # ui display info
>>> schema.displayName()
'{display}'
>>> # database table info
>>> schema.databaseName()
'{db_name}'
</pre>'''
return tip.format(name=self.name(),
group=self.groupName(),
db_name=self.databaseName(),
display=self.displayName())
def toXml(self, xparent=None):
"""
Saves this schema information to XML.
:param xparent | <xml.etree.ElementTree.Element>
:return <xml.etree.ElementTree.Element>
"""
if xparent is not None:
xschema = ElementTree.SubElement(xparent, 'schema')
else:
xschema = ElementTree.Element('schema')
# save the properties
xschema.set('name', self.name())
if self.displayName() != projex.text.pretty(self.name()):
xschema.set('displayName', self.displayName())
if self.inherits():
xschema.set('inherits', self.inherits())
if self.dbname() != projex.text.underscore(projex.text.pluralize(self.name())):
xschema.set('dbname', self.dbname())
if not self.autoPrimary():
xschema.set('autoPrimary', nstr(self.autoPrimary()))
if self.autoLocalize():
xschema.set('autoLocalize', str(self.autoLocalize()))
if self.isCacheEnabled():
xschema.set('cacheEnabled', nstr(self.isCacheEnabled()))
xschema.set('cacheTimeout', nstr(self._cacheTimeout))
xschema.set('preloadCache', nstr(self.preloadCache()))
if self.stringFormat():
xschema.set('stringFormat', self.stringFormat())
if self.isArchived():
xschema.set('archived', nstr(self.isArchived()))
# save the properties
if self._properties:
xprops = ElementTree.SubElement(xschema, 'properties')
for prop, value in sorted(self._properties.items()):
xprop = ElementTree.SubElement(xprops, 'property')
xprop.set('key', nstr(prop))
xprop.set('value', nstr(value))
# save the columns
for column in sorted(self.columns(recurse=False),
key=lambda x: x.name()):
column.toXml(xschema)
# save the indexes
for index in sorted(self.indexes(), key=lambda x: x.name()):
index.toXml(xschema)
# save the pipes
for pipe in sorted(self.pipes(), key=lambda x: x.name()):
pipe.toXml(xschema)
return xschema
def validators(self):
"""
Returns a list of the record validators that are associated with this schema. You
can define different validation addons for a table's schema that will process when
calling the Table.validateRecord method.
:return [<orb.AbstractRecordValidator>, ..]
"""
return self._validators
def view(self, name):
"""
Returns the view for this schema that matches the given name.
:return <orb.View> || None
"""
return self._views.get(name)
@staticmethod
def defaultDbName(name, prefix=''):
"""
Returns the default database table name for the inputted name \
and prefix.
:param name | <str>
:param prefix | <str>
"""
if prefix:
templ = TableSchema.TEMPLATE_PREFIXED
else:
templ = TableSchema.TEMPLATE_TABLE
options = {'name': name, 'prefix': prefix}
return projex.text.render(templ, options)
@staticmethod
def fromXml(xschema, referenced=False):
"""
Generates a new table schema instance for the inputted database schema \
based on the given xml information.
:param xschema | <xml.etree.Element>
:return <TableSchema> || None
"""
tschema = orb.TableSchema(referenced=referenced)
# load the properties
tschema.setName(xschema.get('name', ''))
tschema.setDisplayName(xschema.get('displayName', ''))
tschema.setGroupName(xschema.get('group', ''))
tschema.setInherits(xschema.get('inherits', ''))
tschema.setDbName(xschema.get('dbname', ''))
tschema.setAutoPrimary(xschema.get('autoPrimary') != 'False')
tschema.setAutoLocalize(xschema.get('autoLocalize') == 'True')
tschema.setStringFormat(xschema.get('stringFormat', ''))
tschema.setCacheEnabled(xschema.get('cacheEnabled') == 'True')
# support the old cacheExpire parameter which stored the timeout in minutes.
# as of 4.4.0, all the timeouts will be stored as seconds
tschema.setCacheTimeout(int(xschema.get('cacheTimeout', int(xschema.get('cacheExpire', 0)) * 60)))
tschema.setPreloadCache(xschema.get('preloadCache') == 'True')
tschema.setArchived(xschema.get('archived') == 'True')
# load the properties
xprops = xschema.find('properties')
if xprops is not None:
for xprop in xprops:
tschema.setProperty(xprop.get('key'), xprop.get('value'))
# load the columns
for xcolumn in xschema.findall('column'):
column = orb.Column.fromXml(xcolumn, referenced)
if column:
tschema.addColumn(column)
# load the indexes
for xindex in xschema.findall('index'):
index = orb.Index.fromXml(xindex, referenced)
if index:
tschema.addIndex(index)
# load the pipes
for xpipe in xschema.findall('pipe'):
pipe = orb.Pipe.fromXml(xpipe, referenced)
if pipe:
tschema.addPipe(pipe)
return tschema
import re
from orb import errors
class AbstractColumnValidator(object):
def validate(self, column, value):
"""
Validates the inputted value for this instance.
:param column | <orb.Column>
value | <variant>
:return <bool> | is valid
"""
raise NotImplemented
# -----------------------------------------------------------
class AbstractRecordValidator(object):
# noinspection PyMethodMayBeStatic
def validate(self, record, values):
"""
Validates the record against the inputted dictionary of column
values.
:param record | <orb.Table>
values | {<orb.Column>: <value>, ..}
:return <bool>
"""
raise NotImplemented
#-----------------------------------------------------------
class RegexValidator(AbstractColumnValidator):
def __init__(self):
super(RegexValidator, self).__init__()
# define extra properties
self._expression = ''
self._help = ''
def expression(self):
"""
Returns the regular expression for the inputted text.
:return <str>
"""
return self._expression
def help(self):
"""
Returns the help expression that will be shown to when this validator fails.
:return <str>
"""
return self._help
def setExpression(self, expression):
"""
Sets the regular expression to the inputted text.
:param expression | <str>
"""
self._expression = expression
def setHelp(self, text):
"""
Defines the help text that will be raised when processing this validator.
:param text | <str>
"""
self._help = text
def validate(self, column, value):
"""
Validates the inputted value against this validator expression.
:param value | <variant>
:return <bool> | is valid
"""
try:
valid = re.match(self.expression(), value) is not None
except StandardError:
msg = 'Invalid validator expression: {0}'.format(self.expression())
raise errors.ColumnValidationError(column, msg=msg)
else:
if not valid:
msg = 'Invalid value for {0}, needs to match {1}'.format(column.name(), self.expression())
raise errors.ColumnValidationError(column, msg=self.help() or msg)
return True
#-----------------------------------------------------------
class RequiredValidator(AbstractColumnValidator):
def validate(self, column, value):
"""
Verifies that the inputted value is not a NULL value.
:param value | <variant>
:return <bool>
"""
if value is None or value == '':
msg = '{0} is required.'.format(column.name())
raise errors.ColumnValidationError(column, msg=msg)
return True

Sorry, the diff of this file is too big to display

""" Defines the meta information for a View class. """
import logging
import projex.text
from projex.lazymodule import lazy_import
from projex.text import nativestring as nstr
from xml.etree import ElementTree
from .meta.metaview import MetaView
from . import dynamic
log = logging.getLogger(__name__)
orb = lazy_import('orb')
errors = lazy_import('orb.errors')
class ViewSchema(object):
"""
Contains meta data information about a view as it maps to a database.
"""
TEMPLATE_PREFIXED = '[prefix::underscore::lower]_[name::underscore::lower]'
TEMPLATE_VIEW = '[name::underscore::lower]'
_customHandlers = []
def __cmp__(self, other):
# check to see if this is the same instance
if id(self) == id(other):
return 0
# make sure this instance is a valid one for the other kind
if not isinstance(other, ViewSchema):
return -1
# compare inheritance level
my_ancestry = self.ancestry()
other_ancestry = other.ancestry()
result = cmp(len(my_ancestry), len(other_ancestry))
if not result:
# compare groups
my_group = self.group()
other_group = other.group()
if my_group is not None and other_group is not None:
result = cmp(my_group.order(), other_group.order())
elif my_group:
result = -1
else:
result = 1
if not result:
return cmp(self.name(), other.name())
return result
return result
def __init__(self, referenced=False):
self._abstract = False
self._autoPrimary = True
self._name = ''
self._databaseName = ''
self._groupName = ''
self._dbname = ''
self._inherits = ''
self._stringFormat = ''
self._namespace = ''
self._displayName = ''
self._inheritedLoaded = False
self._cache = None
self._cacheTimeout = 0
self._cacheEnabled = False
self._preloadCache = False
self._static = False
self._columns = set()
self._columnIndex = {}
self._views = {}
self._properties = {}
self._indexes = []
self._pipes = []
self._contexts = {}
self._database = None
self._timezone = None
self._defaultOrder = None
self._group = None
self._primaryColumns = None
self._model = None
self._archiveModel = None
self._referenced = referenced
self._searchEngine = None
self._validators = []
self._archived = False
def addColumn(self, column):
"""
Adds the inputted column to this view schema.
:param column | <orb.Column>
"""
self._columns.add(column)
if not column._schema:
column._schema = self
def addIndex(self, index):
"""
Adds the inputted index to this view schema.
:param index | <orb.Index>
"""
if index in self._indexes:
return
index.setSchema(self)
self._indexes.append(index)
def addPipe(self, pipe):
"""
Adds the inputted pipe reference to this view schema.
:param pipe | <orb.Pipe>
"""
if pipe in self._pipes:
return
pipe.setSchema(self)
self._pipes.append(pipe)
def addValidator(self, validator):
"""
Adds a record validators that are associated with this schema. You
can define different validation addons for a view's schema that will process when
calling the View.validateRecord method.
:param validator | <orb.AbstractRecordValidator>
"""
self._validators.append(validator)
def ancestor(self):
"""
Returns the direct ancestor for this schema that it inherits from.
:return <ViewSchema> || None
"""
if self.inherits():
return orb.system.schema(self.inherits())
return None
def ancestry(self):
"""
Returns the different inherited schemas for this instance.
:return [<ViewSchema>, ..]
"""
if not self._inherits:
return []
schema = orb.system.schema(self.inherits())
if not schema:
return []
return schema.ancestry() + [schema]
def ancestryQuery(self):
"""
Returns the query that will link this schema to its ancestors.
:return <Query> || <QueryCompound>
"""
if not self.inherits():
return None
from orb import Query as Q
query = Q()
ancestry = self.ancestry()
schema = self
ifield_templ = orb.system.settings().inheritField()
for ancest in reversed(ancestry):
afield = ifield_templ.format(view=ancest.databaseName())
sfield = ifield_templ.format(view=schema.databaseName())
if ancest.inherits():
query &= Q(ancest.model(), afield) == Q(schema.model(), sfield)
else:
query &= Q(ancest.model()) == Q(schema.model(), sfield)
schema = ancest
return query
def ancestryModels(self):
"""
Returns the ancestry models for this schema.
:return [<subclass of orb.View>, ..]
"""
return [x.model() for x in self.ancestry()]
def archiveModel(self):
return self._archiveModel
def autoPrimary(self):
"""
Returns whether or not this schema auto-generates the primary key. \
This is useful when defining reusable schemas that could be applied \
to various backends, for instance an auto-increment column for a \
PostgreSQL database vs. a string column for a MongoDB. By default, it \
is recommended this value remain True.
:return <bool>
"""
return self._autoPrimary
def baseAncestor(self):
"""
Returns the base ancestor for this schema.
:return <orb.ViewSchema>
"""
if not self.inherits():
return self
ancestry = self.ancestry()
if ancestry:
return ancestry[0]
return self
def cache(self):
"""
Returns the table cache associated with this schema.
:return <orb.caching.TableCache>
"""
return self._cache or self.database().cache()
def cacheTimeout(self):
"""
Returns the number of seconds that the caching system will use before
clearing its cache data.
:return <int> | seconds
"""
return self._cacheTimeout
def column(self, name, recurse=True, flags=0, kind=0):
"""
Returns the column instance based on its name.
If error reporting is on, then the ColumnNotFound
error will be thrown the key inputted is not a valid
column name.
:param name | <str>
recurse | <bool>
flags | <int>
kind | <int>
:return <orb.Column> || None
"""
key = (name, recurse, flags, kind)
key = hash(key)
# lookup the existing cached record
try:
return self._columnIndex[key]
except KeyError:
pass
# lookup the new record
parts = nstr(name).split('.')
part = parts[0]
next_parts = parts[1:]
# generate the primary columns
found = None
for column in self.columns(recurse=recurse, flags=flags, kind=kind):
if column.isMatch(part):
found = column
break
# lookup referenced joins
if found is not None and next_parts:
refmodel = found.referenceModel()
if refmodel:
refschema = refmodel.schema()
next_part = '.'.join(next_parts)
found = refschema.column(next_part, recurse=recurse, flags=flags, kind=kind)
else:
found = None
# cache this lookup for the future
self._columnIndex[key] = found
return found
def columnNames(self, recurse=True, flags=0, kind=0):
"""
Returns the list of column names that are defined for
this view schema instance.
:return <list> [ <str> columnName, .. ]
"""
return sorted([x.name() for x in self.columns(recurse=recurse, flags=flags, kind=kind)])
def columns(self, recurse=True, flags=0, kind=0):
"""
Returns the list of column instances that are defined
for this view schema instance.
:param recurse | <bool>
flags | <orb.Column.Flags>
kind | <orb.Column.Kind>
:return <list> [ <orb.Column>, .. ]
"""
# generate the primary columns for this schema
output = {column for column in self._columns if
(not flags or column.testFlag(flags)) and
(not kind or column.isKind(kind))}
if kind & orb.Column.Kind.Proxy and self._model:
output.update(self._model.proxyColumns())
return list(output)
def context(self, name):
"""
Returns the context for this schema. This will define different override options when a particular
call is made to the view within a given context.
:param name | <str>
:return <dict>
"""
return self._contexts.get(name, {})
def contexts(self):
"""
Returns the full set of contexts for this schema.
:return {<str> context name: <dict> context, ..}
"""
return self._contexts
def databaseName(self):
"""
Returns the name of the database that this schema will be linked to.
:return <str>
"""
if self._databaseName:
return self._databaseName
else:
try:
return self.group().databaseName()
except AttributeError:
return ''
def database(self):
"""
Returns the database that is linked with the current schema.
:return <Database> || None
"""
if self._database is not None:
return self._database
dbname = self.databaseName()
if dbname:
return orb.system.database(dbname)
return orb.system.database()
def defaultColumns(self):
"""
Returns the columns that should be used by default when querying for
this view.
:return [<orb.Column>, ..]
"""
flag = orb.Column.Flags.IgnoreByDefault
return [col for col in self.columns() if not col.testFlag(flag)]
def defaultOrder(self):
"""
Returns the default order to be used when querying this schema.
:return [(<str> columnName, <str> asc|desc), ..] || None
"""
return self._defaultOrder
def displayName(self):
"""
Returns the display name for this view.
:return <str>
"""
if not self._displayName:
return projex.text.pretty(self.name())
return self._displayName
def fieldNames(self, recurse=True, flags=0, kind=0):
"""
Returns the list of column instances that are defined
for this view schema instance.
:param recurse | <bool>
:return <list> [ <str>, .. ]
"""
return [col.fieldName() for col in self.columns(recurse=recurse, flags=flags, kind=kind)]
def generateModel(self):
"""
Generates the default model class for this view schema, if no \
default model already exists. The new generated view will be \
returned.
:return <subclass of View> || None
"""
if self._model:
return self._model
# generate the base models
if self.inherits():
inherits = self.inherits()
inherited = orb.system.schema(inherits)
if not inherited:
log.error('Could not find inherited model: %s', inherits)
base = None
else:
base = inherited.model(autoGenerate=True)
if base:
bases = [base]
else:
bases = [orb.system.baseViewType()]
else:
bases = [orb.system.baseViewType()]
# generate the attributes
attrs = {'__db_schema__': self, '__module__': 'orb.schema.dynamic'}
grp = self.group()
prefix = ''
if grp:
prefix = grp.modelPrefix()
cls = MetaView(prefix + self.name(), tuple(bases), attrs)
setattr(dynamic, cls.__name__, cls)
# generate archive layer
if self.isArchived():
# create the archive column
archive_columns = []
# create a duplicate of the existing columns, disabling translations since we'll store
# a single record per change
found_locale = False
for column in self.columns(recurse=False, flags=~orb.Column.Flags.Primary):
new_column = column.copy()
new_column.setTranslatable(False)
archive_columns.append(new_column)
if column.name() == 'locale':
found_locale = True
archive_columns += [
# primary key for the archives is a reference to the article
orb.Column(orb.ColumnType.ForeignKey,
self.name(),
fieldName='{0}_archived_id'.format(projex.text.underscore(self.name())),
required=True,
reference=self.name(),
reversed=True,
reversedName='archives'),
# and its version
orb.Column(orb.ColumnType.Integer,
'archiveNumber',
required=True),
# created the archive at method
orb.Column(orb.ColumnType.DatetimeWithTimezone,
'archivedAt',
default='now')
]
# store data per locale
if not found_locale:
archive_columns.append(orb.Column(orb.ColumnType.String,
'locale',
fieldName='locale',
required=True,
maxLength=5))
archive_data = {
'__db__': self.databaseName(),
'__db_group__': self.groupName(),
'__db_name__': '{0}Archive'.format(self.name()),
'__db_dbname__': '{0}_archives'.format(projex.text.underscore(self.name())),
'__db_columns__': archive_columns,
'__db_pipes__': [],
'__db_schema__': None,
'__db_abstract__': False,
'__db_inherits__': None,
'__db_autoprimary__': True,
'__db_archived__': False
}
archive_class = MetaView(archive_data['__db_name__'], (orb.View,), archive_data)
archive_schema = archive_class.schema()
archive_schema.setDefaultOrder([('archiveNumber', 'asc')])
self.setArchiveModel(archive_class)
setattr(dynamic, archive_class.__name__, archive_class)
return cls
def generatePrimary(self):
"""
Auto-generates the primary column for this schema based on the \
current settings.
:return [<orb.Column>, ..] || None
"""
if orb.system.settings().editOnlyMode():
return None
db = self.database()
if not (db and db.backend()):
return None
# create the default primary column from the inputted type
return [db.backend().defaultPrimaryColumn()]
def group(self):
"""
Returns the schema group that this schema is related to.
:return <orb.ViewGroup> || None
"""
if not self._group:
dbname = self._databaseName
grp = orb.system.group(self.groupName(), database=dbname)
self._group = grp
return self._group
def groupName(self):
"""
Returns the name of the group that this schema is a part of in the \
database.
:return <str>
"""
if self._group:
return self._group.name()
return self._groupName
def hasColumn(self, column, recurse=True, flags=0, kind=0):
"""
Returns whether or not this column exists within the list of columns
for this schema.
:return <bool>
"""
return column in self.columns(recurse=recurse, flags=flags, kind=kind)
def hasTranslations(self):
for col in self.columns():
if col.isTranslatable():
return True
return False
def indexes(self, recurse=True):
"""
Returns the list of indexes that are associated with this schema.
:return [<orb.Index>, ..]
"""
return self._indexes[:]
def inherits(self):
"""
Returns the name of the view schema that this class will inherit from.
:return <str>
"""
return self._inherits
def inheritsModel(self):
if not self.inherits():
return None
return orb.system.model(self.inherits())
def inheritsRecursive(self):
"""
Returns all of the views that this view inherits from.
:return [<str>, ..]
"""
output = []
inherits = self.inherits()
while inherits:
output.append(inherits)
view = orb.system.schema(inherits)
if not view:
break
inherits = view.inherits()
return output
def isAbstract(self):
"""
Returns whether or not this schema is an abstract view. Abstract \
views will not register to the database, but will serve as base \
classes for inherited views.
:return <bool>
"""
return self._abstract
def isArchived(self):
"""
Returns whether or not this schema is archived. Archived schema's will store additional records
each time a record is created or updated for historical reference.
:return <bool>
"""
return self._archived
def isCacheEnabled(self):
"""
Returns whether or not caching is enabled for this View instance.
:sa setCacheEnabled
:return <bool>
"""
return self._cacheEnabled
def isReferenced(self):
"""
Returns whether or not this schema is referenced from an external file.
:return <bool>
"""
return self._referenced
def isStatic(self):
return self._static
def model(self, autoGenerate=False):
"""
Returns the default View class that is associated with this \
schema instance.
:param autoGenerate | <bool>
:return <subclass of View>
"""
if self._model is None and autoGenerate:
self._model = self.generateModel()
return self._model
def name(self):
"""
Returns the name of this schema object.
:return <str>
"""
return self._name
def namespace(self):
"""
Returns the namespace of this schema object. If no namespace is
defined, then its group namespace is utilized.
:return <str>
"""
if self._namespace:
return self._namespace
grp = self.group()
if grp:
return grp.namespace()
else:
db = self.database()
if db:
return db.namespace()
return orb.system.namespace()
def polymorphicColumn(self):
"""
Returns the first column that defines the polymorphic view type
for this view.
:return <orb.Column> || None
"""
for column in self.columns():
if column.isPolymorphic():
return column
return None
def pipe(self, name):
"""
Returns the pipe that matches the inputted name.
:return <orb.Pipe> || None
"""
for pipe in self._pipes:
if pipe.name() == name:
return pipe
return None
def pipes(self, recurse=True):
"""
Returns a list of the pipes for this instance.
:return [<orb.Pipe>, ..]
"""
return self._pipes[:]
def preloadCache(self):
"""
Returns whether or not this cache will be preloaded into memory.
:return <bool>
"""
return self._preloadCache
def primaryColumn(self):
"""
Returns the primary column for this table. If this table has no or multiple primary columns defined,
then a PrimaryKeyNotDefined will be raised, otherwise the first column will be returned.
:return <orb.Column>
"""
cols = self.primaryColumns()
if len(cols) == 1:
return cols[0]
else:
raise orb.PrimaryKeyNotDefined(self)
def primaryColumns(self, db=None):
"""
Returns the primary key columns for this view's
schema.
:return <tuple> (<orb.Column>,)
"""
return [col for col in self._columns if col.primary()]
def property(self, key, default=None):
"""
Returns the custom data that was stored on this view at the inputted \
key. If the key is not found, then the default value will be returned.
:param key | <str>
default | <variant>
:return <variant>
"""
return self._properties.get(nstr(key), default)
def removeColumn(self, column):
"""
Removes the inputted column from this view's schema.
:param column | <orb.Column>
"""
try:
self._columns.remove(column)
column._schema = None
except ValueError:
pass
def removeIndex(self, index):
"""
Removes the inputted index from this view's schema.
:param index | <orb.Index>
"""
try:
self._indexes.remove(index)
except ValueError:
pass
def removePipe(self, pipe):
"""
Removes the inputted pipe from this view's schema.
:param pipe | <orb.Pipe>
"""
try:
self._pipes.remove(pipe)
except ValueError:
pass
def removeValidator(self, validator):
"""
Removes a record validators that are associated with this schema. You
can define different validation addons for a view's schema that will process when
calling the View.validateRecord method.
:param validator | <orb.AbstractRecordValidator>
"""
try:
self._validators.remove(validator)
except ValueError:
pass
def reverseLookup(self, name):
"""
Returns the reverse lookup that matches the inputted name.
:return <orb.Column> || None
"""
return {column.reversedName(): column for schema in orb.system.schemas()
for column in schema.columns()
if column.reference() == self.name() and column.isReversed()}.get(name)
def reverseLookups(self):
"""
Returns a list of all the reverse-lookup columns that reference this schema.
:return [<orb.Column>, ..]
"""
return [column for schema in orb.system.schemas()
for column in schema.columns()
if column.reference() == self.name() and column.isReversed()]
def searchEngine(self):
"""
Returns the search engine that will be used for this system.
:return <orb.SearchEngine>
"""
if self._searchEngine:
return self._searchEngine
else:
return orb.system.searchEngine()
def searchableColumns(self, recurse=True, flags=0, kind=0):
"""
Returns a list of the searchable columns for this schema.
:return <str>
"""
return self.columns(recurse=recurse, flags=flags | orb.Column.Flags.Searchable, kind=kind)
def setAbstract(self, state):
"""
Sets whether or not this view is abstract.
:param state | <bool>
"""
self._abstract = state
def setAutoPrimary(self, state):
"""
Sets whether or not this schema will use auto-generated primary keys.
:sa autoPrimary
:return <bool>
"""
self._autoPrimary = state
def setArchived(self, state=True):
"""
Sets the archive state for this schema.
:param state | <bool>
"""
self._archived = state
def setArchiveModel(self, model):
self._archiveModel = model
def setCache(self, cache):
"""
Sets the table cache for this instance.
:param cache | <orb.caching.TableCache>
"""
self._cache = cache
def setCacheEnabled(self, state):
"""
Sets whether or not to enable caching on the View instance this schema
belongs to. When caching is enabled, all the records from the view
database are selected the first time a select is called and
then subsequent calls to the database are handled by checking what is
cached in memory. This is useful for small views that don't change
often (such as a Status or Type view) and are referenced frequently.
To have the cache clear automatically after a number of minutes, set the
cacheTimeout method.
:param state | <bool>
"""
self._cacheEnabled = state
if self._cache:
self._cache.setEnabled(state)
def setCacheTimeout(self, seconds):
"""
Sets the number of seconds that the view should clear its cached
results from memory and re-query the database. If the value is 0, then
the cache will have to be manually cleared.
:param seconds | <int> || <float>
"""
self._cacheTimeout = seconds
def setColumns(self, columns):
"""
Sets the columns that this schema uses.
:param columns | [<orb.Column>, ..]
"""
self._columns = set(columns)
# pylint: disable-msg=W0212
for column in columns:
if not column._schema:
column._schema = self
def setContext(self, name, context):
"""
Sets the context for this schema. This will define different override options when a particular
call is made to the view within a given context.
:param name | <str>
context | <dict>
"""
self._contexts[name] = dict(context)
def setContexts(self, contexts):
"""
Sets the full context set for this view to the inputted dictionary of contexts.
:param contexts | {<str> context name: <dict>, ..}
"""
self._contexts = dict(contexts)
def setDefaultOrder(self, order):
"""
Sets the default order for this schema to the inputted order. This
will be used when an individual query for this schema does not specify
an order explicitly.
:param order | [(<str> columnName, <str> asc|desc), ..] || None
"""
self._defaultOrder = order
def setProperty(self, key, value):
"""
Sets the custom data at the given key to the inputted value.
:param key | <str>
value | <variant>
"""
self._properties[nstr(key)] = value
def setDatabase(self, database):
"""
Sets the database name that this schema will be linked to.
:param database | <orb.Database> || <str> || None
"""
if isinstance(database, orb.Database):
self._database = database
elif database is None:
self._database = None
else:
self._database = None
self.setDatabaseName(database)
def setDatabaseName(self, databaseName):
"""
Sets the database name that this schema will be linked to.
:param databaseName | <str>
"""
self._databaseName = nstr(databaseName)
def setDisplayName(self, name):
"""
Sets the display name for this view.
:param name | <str>
"""
self._displayName = name
def setModel(self, model):
"""
Sets the default View class that is associated with this \
schema instance.
:param model | <subclass of View>
"""
self._model = model
def setNamespace(self, namespace):
"""
Sets the namespace that will be used for this schema to the inputted
namespace.
:param namespace | <str>
"""
self._namespace = namespace
def setIndexes(self, indexes):
"""
Sets the list of indexed lookups for this schema to the inputted list.
:param indexes | [<orb.Index>, ..]
"""
self._indexes = indexes[:]
def setInherits(self, name):
"""
Sets the name for the inherited view schema to the inputted name.
:param name | <str>
"""
self._inherits = name
def setName(self, name):
"""
Sets the name of this schema object to the inputted name.
:param name | <str>
"""
self._name = name
def setGroup(self, group):
"""
Sets the group association for this schema to the inputted group.
:param group | <orb.ViewGroup>
"""
self._group = group
self._groupName = group.name() if group else ''
def setGroupName(self, groupName):
"""
Sets the group name that this view schema will be apart of.
:param groupName | <str>
"""
self._groupName = groupName
def setPipes(self, pipes):
"""
Sets the pipe methods that will be used for this schema.
:param pipes | [<orb.Pipes>, ..]
"""
self._pipes = pipes
def setPreloadCache(self, state):
"""
Sets whether or not to preload all records for this view as
a cache.
:param state | <bool>
"""
self._preloadCache = state
def setSearchEngine(self, engine):
"""
Sets the search engine that will be used for this system.
:param engine | <orb.SearchEngine>
"""
self._searchEngine = engine
def setStatic(self, static):
self._static = static
def setStringFormat(self, text):
"""
Sets a string format to be used when rendering a view using the str()
method. This is a python string format with dictionary keys for the
column values that you want to display.
:param format | <str>
"""
self._stringFormat = nstr(text)
def setDbName(self, dbname):
"""
Sets the name that will be used in the actual database. If the \
name supplied is blank, then the default database name will be \
used based on the group and name for this schema.
:param dbname | <str>
"""
self._dbname = dbname
def setTimezone(self, timezone):
"""
Sets the timezone associated directly to this database.
:sa <orb.Orb.setTimezone>
:param timezone | <pytz.tzfile> || None
"""
self._timezone = timezone
def setValidators(self, validators):
"""
Sets the list of the record validators that are associated with this schema. You
can define different validation addons for a view's schema that will process when
calling the View.validateRecord method.
:param validators | [<orb.AbstractRecordValidator>, ..]
"""
self._validators = validators
def setView(self, name, view):
"""
Adds a new view to this schema. Views provide pre-built dynamically joined tables that can
give additional information to a table.
:param name | <str>
view | <orb.View>
"""
self._views[name] = view
def stringFormat(self):
"""
Returns the string format style for this schema.
:return <str>
"""
return self._stringFormat
def dbname(self):
"""
Returns the name that will be used for the view in the database.
:return <str>
"""
if not self._dbname:
grp = self.group()
prefix = grp.modelPrefix() if grp and grp.useModelPrefix() else ''
self._dbname = self.defaultDbName(self.name(), prefix)
return self._dbname
def timezone(self, options=None):
"""
Returns the timezone associated specifically with this database. If
no timezone is directly associated, then it will return the timezone
that is associated with the Orb system in general.
:sa <orb.Orb>
:return <pytz.tzfile> || None
"""
if self._timezone is None:
return self.database().timezone(options)
return self._timezone
def toolTip(self, context='normal'):
tip = '''\
<b>{name} <small>(View from {group} group)</small></b><br>
<em>Usage</em>
<pre>
>>> # api usage
>>> record = {name}()
>>> record.commit()
>>> all_records = {name}.all()
>>> some_records = {name}.select(where=query)
>>> # meta data
>>> schema = {name}.schema()
>>> # ui display info
>>> schema.displayName()
'{display}'
>>> # database view info
>>> schema.databaseName()
'{db_name}'
</pre>'''
return tip.format(name=self.name(),
group=self.groupName(),
db_name=self.databaseName(),
display=self.displayName())
def toXml(self, xparent=None):
"""
Saves this schema information to XML.
:param xparent | <xml.etree.ElementTree.Element>
:return <xml.etree.ElementTree.Element>
"""
if xparent is not None:
xschema = ElementTree.SubElement(xparent, 'schema')
else:
xschema = ElementTree.Element('schema')
# save the properties
xschema.set('name', self.name())
if self.displayName() != projex.text.pretty(self.name()):
xschema.set('displayName', self.displayName())
if self.inherits():
xschema.set('inherits', self.inherits())
if self.dbname() != projex.text.underscore(projex.text.pluralize(self.name())):
xschema.set('dbname', self.dbname())
if not self.autoPrimary():
xschema.set('autoPrimary', nstr(self.autoPrimary()))
if self.isCacheEnabled():
xschema.set('cacheEnabled', nstr(self.isCacheEnabled()))
xschema.set('cacheTimeout', nstr(self._cacheTimeout))
xschema.set('preloadCache', nstr(self.preloadCache()))
if self.stringFormat():
xschema.set('stringFormat', self.stringFormat())
if self.isArchived():
xschema.set('archived', nstr(self.isArchived()))
# save the properties
if self._properties:
xprops = ElementTree.SubElement(xschema, 'properties')
for prop, value in sorted(self._properties.items()):
xprop = ElementTree.SubElement(xprops, 'property')
xprop.set('key', nstr(prop))
xprop.set('value', nstr(value))
# save the columns
for column in sorted(self.columns(recurse=False),
key=lambda x: x.name()):
column.toXml(xschema)
# save the indexes
for index in sorted(self.indexes(), key=lambda x: x.name()):
index.toXml(xschema)
# save the pipes
for pipe in sorted(self.pipes(), key=lambda x: x.name()):
pipe.toXml(xschema)
return xschema
def validators(self):
"""
Returns a list of the record validators that are associated with this schema. You
can define different validation addons for a view's schema that will process when
calling the View.validateRecord method.
:return [<orb.AbstractRecordValidator>, ..]
"""
return self._validators
def view(self, name):
"""
Returns the view for this schema that matches the given name.
:return <orb.View> || None
"""
return self._views.get(name)
@staticmethod
def defaultDbName(name, prefix=''):
"""
Returns the default database view name for the inputted name \
and prefix.
:param name | <str>
:param prefix | <str>
"""
if prefix:
templ = ViewSchema.TEMPLATE_PREFIXED
else:
templ = ViewSchema.TEMPLATE_VIEW
options = {'name': name, 'prefix': prefix}
return projex.text.render(templ, options)
@staticmethod
def fromXml(xschema, referenced=False):
"""
Generates a new view schema instance for the inputted database schema \
based on the given xml information.
:param xschema | <xml.etree.Element>
:return <ViewSchema> || None
"""
tschema = orb.ViewSchema(referenced=referenced)
# load the properties
tschema.setName(xschema.get('name', ''))
tschema.setDisplayName(xschema.get('displayName', ''))
tschema.setGroupName(xschema.get('group', ''))
tschema.setInherits(xschema.get('inherits', ''))
tschema.setDbName(xschema.get('dbname', ''))
tschema.setAutoPrimary(xschema.get('autoPrimary') != 'False')
tschema.setStringFormat(xschema.get('stringFormat', ''))
tschema.setCacheEnabled(xschema.get('cacheEnabled') == 'True')
# support the cacheExpire key that was stored as minutes, as of ORB 4.4.0, all cache
# timeouts are in seconds
tschema.setCacheTimeout(int(xschema.get('cacheTimeout', int(xschema.get('cacheExpire', 0)) * 60)))
tschema.setPreloadCache(xschema.get('preloadCache') == 'True')
tschema.setArchived(xschema.get('archived') == 'True')
# load the properties
xprops = xschema.find('properties')
if xprops is not None:
for xprop in xprops:
tschema.setProperty(xprop.get('key'), xprop.get('value'))
# load the columns
for xcolumn in xschema.findall('column'):
column = orb.Column.fromXml(xcolumn, referenced)
if column:
tschema.addColumn(column)
# load the indexes
for xindex in xschema.findall('index'):
index = orb.Index.fromXml(xindex, referenced)
if index:
tschema.addIndex(index)
# load the pipes
for xpipe in xschema.findall('pipe'):
pipe = orb.Pipe.fromXml(xpipe, referenced)
if pipe:
tschema.addPipe(pipe)
return tschema
"""
Defines a searching algorithm for searching across multiple tables.
"""
from .engine import SearchEngine
from .terms import SearchTerm, SearchTermGroup
from .spelling import SpellingEngine
from .thesaurus import SearchThesaurus
"""
Defines a searching algorithm for searching across multiple tables.
"""
from projex.addon import AddonManager
from .terms import SearchTermGroup
from .thesaurus import SearchThesaurus
from .spelling import SpellingEngine
class SearchEngine(AddonManager):
def __init__(self, *tables):
self._tables = list(tables)
self._parser = None
self._thesaurus = SearchThesaurus()
self._spellingEngine = SpellingEngine()
def addTable(self, table):
"""
Adds a table to the search.
:param table | <orb.Table>
"""
if table not in self._tables:
self._tables.append(table)
def autocorrect(self, phrase):
"""
Auto-correct the inputted search phrase through the spelling engine.
:param phrase | <str>
"""
spelling = self.spellingEngine()
words = phrase.split()
for i in range(len(words)):
words[i] = spelling.autocorrect(words[i])
return u' '.join(words)
def hasTable(self, table):
"""
Returns whether or not the inputted table is included with this search.
:return <bool>
"""
return table in self._tables
def parse(self, text):
"""
Parses the inputted text into the search engine terms.
:param text | <unicode>
:return <orb.SearchTermGroup>
"""
return SearchTermGroup.fromString(text)
def parseQuery(self, table, text):
"""
Takes the given table and generates a Query object for the inputted text.
:param table | <orb.Table>
text | <str>
:return <orb.Query>
"""
return self.parse(text).toQuery(table)
def setParser(self, parser):
"""
Sets the parser that will be utilized for this engine.
:param parser | <orb.SearchParser>
"""
self._parser = parser
def removeTable(self, table):
"""
Removes the inputted table from this engine.
:param table | <orb.Table>
"""
try:
self._tables.remove(table)
except ValueError:
pass
def spellingEngine(self):
"""
Returns the spelling suggestion engine for this search engine.
:return <orb.SpellingEngine>
"""
return self._spellingEngine
def suggestions(self, phrase, locale=None, limit=10):
"""
Returns the best guess suggestions for the inputted phrase.
:param phrase | <str> || <unicode>
:return [(<unicode> phrase, <int> ranking), ..]
"""
spelling = self.spellingEngine()
known = []
words = phrase.split()
for word in words[:-1]:
known.append(spelling.autocorrect(word, locale))
output = []
for suggestion in spelling.suggestions(words[-1], locale, limit):
output.append(u' '.join(known + [suggestion]))
return output
def setThesaurus(self, thesaurus):
"""
Sets the search thesaurus that is associated with this engine.
:param thesaurus | <orb.SearchThesaurus>
"""
self._thesaurus = thesaurus
def thesaurus(self):
"""
Returns the search thesaurus that is associated with this engine.
:return <orb.SearchThesaurus>
"""
return self._thesaurus
import collections
import os
import re
from projex.lazymodule import lazy_import
orb = lazy_import('orb')
class SpellingEngine(object):
Alphabet = {
'default': u'abcdefghijklmnopqrstuvwxyz',
'en_US': u'abcdefghijklmnopqrstuvwxyz'
}
def __init__(self):
self._rankings = {}
def alphabet(self, locale=None):
"""
Returns the alphabet for this spelling system. This will be a
language based set based on the language for this suggester.
:param locale | <str> || None
"""
if locale is None:
locale = orb.system.locale()
default = SpellingEngine.Alphabet['default']
return SpellingEngine.Alphabet.get(locale, default)
def autocorrect(self, word, locale=None):
"""
Returns the best guess for the inputted word for the given locale.
:param word | <str> || <unicode>
locale | <str> || None
:return <unicode> word
"""
choices = self.knownWords([word], locale)
if not choices:
edits = self.knownEdits(word, locale)
choices = self.knownWords(edits, locale) or edits or [word]
rankings = self.rankings(locale)
return max(choices, key=lambda x: x.startswith(word) * 10 ** 10 + rankings.get(x))
def commonEdits(self, word, locale=None):
"""
Returns the most common edits for the inputted word for a given
locale.
:param word | <str> || <unicode>
locale | <str> || <unicode> || None
:return {<unicode> word, ..}
"""
alphabet = self.alphabet(locale)
s = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in s if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in s if len(b) > 1]
replaces = [a + c + b[1:] for a, b in s for c in alphabet if b]
inserts = [a + c + b for a, b in s for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def createRankings(self, source, locale=None):
"""
Sets the source text information for this locale.
:param words | <str> || <unicode>
locale | <str> || None
"""
# collect all the words from the source
words = re.findall('[a-z]+', source.lower())
# define the counts per word for common tests
rankings = collections.defaultdict(lambda: 1)
for word in words:
rankings[word] += 1
return rankings
def knownEdits(self, word, locale=None):
"""
Returns the known words for the most common edits for the inputted word.
:param word | <str> || <unicode>
locale | <str> || None
:return {<unicode> word, ..}
"""
rankings = self.rankings(locale)
return set(e2 for e1 in self.commonEdits(word, locale)
for e2 in self.commonEdits(e1) if e2 in rankings)
def knownWords(self, words, locale=None):
"""
Returns a set of the known words based on the inputted list of
words compared against the locale's data set.
:param words | [<str> || <unicode>, ..]
locale | <str> || None
:return {<unicode> word, ..}
"""
rankings = self.rankings(locale)
return set(w for w in words if w in rankings)
def ranking(self, word, locale=None):
"""
Returns the ranking for the inputted word within the spelling engine.
:param word | <str>
:return <int>
"""
rankings = self.rankings(locale)
return rankings.get(word, 0)
def rankings(self, locale=None):
"""
Loads the source text for this spelling suggester based on the
inputted locale source.
:param locale | <str> || None
:return {<unicode> word: <int> ranking, ..}
"""
if locale is None:
locale = orb.system.locale()
try:
return self._rankings[locale]
except KeyError:
filepath = os.path.join(os.path.dirname(__file__),
'words',
'{0}.txt'.format(locale))
try:
source = file(filepath).read()
except OSError:
return {}
# set the words for this locale
rankings = self.createRankings(source, locale)
self._rankings[locale] = rankings
return rankings
def suggestions(self, word, locale=None, limit=10):
"""
Returns a list of the best guesses for the inputted word for
the given locale, along with their ranking.
:param word | <str> || <unicode>
locale | <str> || None
:return [<unicode> word, ..]
"""
edits = list(self.knownEdits(word, locale))
choices = self.knownWords([word] + edits, locale) or edits or [word]
choices = list(choices)
rankings = self.rankings(locale)
choices.sort(key=lambda x: x.startswith(word) * 10 ** 10 + rankings.get(x))
choices.reverse()
return choices[:limit]
"""
Defines a searching algorithm for searching across multiple tables.
"""
from projex.lazymodule import lazy_import
orb = lazy_import('orb')
pyparsing = lazy_import('pyparsing')
class SearchJoiner(object):
def __init__(self, text):
self._text = text
def __str__(self):
return self.toString()
def toString(self):
return self._text
# ---------------------------------------------------------------------
class SearchTerm(object):
def __init__(self, engine, text, column=''):
self._engine = engine
self._text = text
self._column = column
def __str__(self):
return self.toString()
def append(self, word):
if self._text:
self._text += u' ' + word
else:
self._text = word
def column(self):
return self._column
def isExact(self):
"""
Returns whether or not this is an exact term to match.
:return <bool>
"""
check = self._text.lstrip('-')
return (check.startswith('"') and check.endswith('"')) or \
(check.startswith("'") and check.endswith("'"))
def isNegated(self):
"""
Returns whether or not this search term is negated.
:return <bool>
"""
return self._text.startswith('-')
def setColumn(self, column):
self._column = column
def text(self):
"""
Returns the text for this term.
:return <unicode>
"""
return self._text.lstrip('-').strip('"').strip("'")
def toQuery(self, table, column=''):
column = column or self.column()
col = table.schema().column(column)
# if the column is not specified then return an or join
# for all searchable columns
if not col:
search_columns = table.schema().searchableColumns()
out = orb.Query()
for search_column in search_columns:
out |= self.toQuery(table, search_column)
return out
# otherwise, search the provided column
else:
# use a non-column based ':' search term
if self.column() and col.name() != self.column():
text = '{0}:'.format(self.column()) + self._text
else:
text = self._text
negated = text.startswith('-')
text = text.lstrip('-')
exact = (text.startswith('"') and text.endswith("'")) or \
(text.startswith("'") and text.endswith('"'))
text = text.strip('"').strip("'")
text = text.replace(u'\\', u'\\\\')
if self.column():
expr = u'^{0}$'.format(text).replace('*', '\w+')
elif exact:
expr = text.replace('*', '\w+')
else:
parts = []
for word in text.split():
words = self._engine.thesaurus().synonyms(word)
if len(words) > 1:
item = u'({0})'.format(u'|'.join(words))
else:
if '*' in word:
word = u'(^|\W){0}(\W|$)'.format(word.replace('*', '.*'))
item = word
parts.append(item)
expr = u'.*'.join(parts)
if negated:
return orb.Query(table, column).asString().doesNotMatch(expr, caseSensitive=False)
else:
return orb.Query(table, column).asString().matches(expr, caseSensitive=False)
def toString(self):
if self.column():
return u'{0}:{1}'.format(self.column(), self._text)
else:
return self._text
#----------------------------------------------------------------------
class SearchTermGroup(object):
def __init__(self, engine, words, column='', root=False):
self._engine = engine
self._terms = []
self._column = column
self._root = root
last_column = ''
for word in words:
curr_column = last_column
# a group of terms
if type(word) == list:
self._terms.append(SearchTermGroup(engine,
word,
column=curr_column))
# lookup a specific column
elif not (word.startswith('"') or word.startswith("'")) and word.count(':') == 1:
if word.endswith(':'):
last_column = word.strip(':')
else:
curr_column, text = word.split(':')
self._terms.append(SearchTerm(engine,
text,
column=curr_column))
# load a joiner
elif word in ('AND', 'OR'):
self._terms.append(SearchJoiner(word))
# load a term
else:
# update terms for the same column
if self._terms and \
type(self._terms[-1]) == SearchTerm and \
self._terms[-1].column() == curr_column:
self._terms[-1].append(word)
# otherwise, create a new term
else:
self._terms.append(SearchTerm(engine,
word,
column=curr_column))
# reset the last key
if last_column == curr_column and curr_column:
last_column = ''
def __str__(self):
return self.toString()
def column(self):
return self._column
def setColumn(self, column):
self._column = column
def terms(self):
"""
Returns the search terms for this group.
:return [<SearchTerm> || <SearchTermGroup>, ..]
"""
return self._terms
def toQuery(self, table, column=''):
"""
Creates a query for the inputted table based on this search term
information.
:param table | <orb.Table>
:return <orb.Query> || <orb.QueryCompound>
"""
column = self.column() or column
search_columns = table.schema().searchableColumns()
out = orb.Query()
op = orb.QueryCompound.Op.And
for term in self.terms():
# update the joining operator
if type(term) == SearchJoiner:
if str(term) == 'AND':
op = orb.QueryCompound.Op.And
else:
op = orb.QueryCompound.Op.Or
continue
# generate the search term
subq = None
term_column = term.column() or column
if term_column:
subq = term.toQuery(table, term_column)
# generate the search columns
elif search_columns:
subq = orb.Query()
for search_column in search_columns:
subq |= term.toQuery(table, search_column)
if op == orb.QueryCompound.Op.And:
out &= subq
else:
out |= subq
return out
def toString(self):
words = []
for term in self._terms:
words.append(str(term))
if self.column():
return u'{0}:({1})'.format(self.column(), u' '.join(words))
elif not self._root:
return u'({0})'.format(u' '.join(words))
else:
return u' '.join(words)
@staticmethod
def fromString(text, engine=None):
"""
Parses the inputted text using the common searching syntax. By default,
the words will be separated and processed to their root, joined
together as an AND join. Override behaviors exist as well:
:syntax terms | general parsing
"<terms>" | Exact match for terms
(<terms>) AND (<terms>) | AND join for two terms
(<terms>) OR (<terms>) | OR join for two terms
-<terms> | NOT search for terms
<terms>* | startswith
*<terms> | endswith
<column>:<terms> | column IS specific term
<column>:-<terms> | column IS NOT specific term
:param text | <unicode>
:return <SearchParseResult>
"""
if engine is None:
engine = orb.system.searchEngine()
expr = pyparsing.nestedExpr('(', ')')
words = expr.parseString(u'({0})'.format(text)).asList()
return SearchTermGroup(engine, words[0], root=True)
"""
Defines a searching algorithm for searching across multiple tables.
"""
import re
from xml.etree import ElementTree
from projex.addon import AddonManager
from projex.enum import enum
from projex.text import nativestring as nstr
class SearchThesaurus(AddonManager):
"""
Defines a global thesaurus system for searching. This will allow
additional keyword lookups based on synonyms. Thesaurus can be
define on a per API and per Table system.
"""
Flags = enum('FindSingular', 'FindPlural', 'FindInherited')
def __init__(self, wordsets=None, parent=None):
self._wordsets = []
self._phrases = set()
self._parent = parent
if wordsets is not None:
self.update(wordsets)
def add(self, word, synonym):
"""
Adds a new synonym for a given word.
:param word | <str>
synonym | <str>
"""
word = nstr(word)
synonym = nstr(synonym)
# lookup wordsets
for wordset in self._wordsets:
if word in wordset:
wordset.add(synonym)
# define new wordset
self._wordsets.append({word, synonym})
def addset(self, wordset):
"""
Defines a wordset that will be used as a grouping of synonyms
within the thesaurus. A wordset is a comma separated list of
synonyms.
:param wordset | <str> || <list> || <set>
"""
if type(wordset) in (list, tuple, set):
wordset = set([nstr(word) for word in wordset])
else:
wordset = set(nstr(wordset).split(','))
self._wordsets.append(wordset)
def addPhrase(self, pattern):
"""
Phrases define groups of words that will create a singular
search term within a search pattern. For instance, "is not" will be
treated as single term, so instead of looking for "is" and "not", the
phrase "is not" will be matched. The inputted pattern can be a regular
expression, or hard set of terms.
:param pattern | <str>
"""
self._phrases.add(pattern)
def clear(self):
"""
Clears out the data for this thesaurus.
"""
self._phrases = set()
self._wordsets = []
def expand(self, wordset, flags=Flags.all()):
"""
Expands a given wordset based on the synonyms per word in the set.
:param wordset | <list> || <tuple> || <set>
"""
output = set()
for word in wordset:
output = output.union(self.synonyms(word, flags))
return output
def parent(self):
"""
Returns the parent thesaurus for this instance if any is defined.
:return <orb.SearchThesaurus>
"""
return self._parent
def phrases(self):
"""
Returns a list of phrases for the searching pattern.
:return [<str>, ..]
"""
out = set(self._phrases)
if self.parent():
out = out.union(self.parent().phrases())
return out
def load(self, xml):
"""
Loads the thesaurus information from the inputted XML file.
:param filename | <str>
"""
try:
xroot = ElementTree.parse(xml).getroot()
except StandardError:
try:
xroot = ElementTree.fromstring(xml)
except StandardError:
return False
# load wordsets
xwordsets = xroot.find('wordsets')
if xwordsets is not None:
for xwordset in xwordsets:
self.addset(xwordset.text)
# load patterns
xphrases = xroot.find('phrases')
if xphrases is not None:
for xphrase in xphrases:
self.addPhrase(xphrase.text)
return True
def remove(self, word, synonym):
"""
Removes a given synonym from the inputted word in a wordset.
:param word | <str>
synonym | <str>
"""
word = nstr(word)
synonym = nstr(synonym)
for wordset in self._wordsets:
if word in wordset:
try:
wordset.remove(synonym)
except KeyError:
pass
return
def removePhrase(self, pattern):
"""
Removes the given phrasing pattern for this search term.
:param pattern | <str>
"""
try:
self._phrases.remove(pattern)
except KeyError:
pass
def setParent(self, parent):
"""
Sets the parent thesaurus for this instance if any is defined.
:param parent | <orb.SearchThesaurus> || None
"""
self._parent = parent
def splitterms(self, text):
"""
Splits the inputted search text into search terms. This will use the
phrasing patterns within this thesaurus to determine groups of words.
:param text | <str>
:return [<str>, ..]
"""
text = nstr(text)
repl = []
# pre-process all phrases into their own groups
for phrase in self.phrases():
grp = re.search(phrase, text)
while grp and grp.group():
result = grp.group()
text = text.replace(result, '`REGEXGRP{0}`'.format(len(repl)))
repl.append(result)
grp = re.search(phrase, text)
# split the terms into their own words, grouped together with phrases
output = []
for term in text.split():
term = term.strip()
grp = re.match('`REGEXGRP(\d+)`(.*)', term)
if grp:
index, remain = grp.groups()
term = repl[int(index)] + remain
output.append(term)
return output
def synonyms(self, word, flags=Flags.all()):
"""
Looks up the synonyms for the given word within this thesaurus
system.
:param word | <str>
:return set(<str>, ..)
"""
word = nstr(word)
output = {word}
# find matching words
for wordset in self._wordsets:
if output.intersection(wordset):
output = output.union(wordset)
# lookup inherited synonyms
if self.parent() and flags & SearchThesaurus.Flags.FindInherited:
output = output.union(self.parent().synonyms(word, flags=flags))
return output
def update(self, wordsets):
"""
Updates the records for this thesaurus' wordsets with the inputted
list of sets.
:param wordsets | [<str>, ..]
"""
for wordset in wordsets:
self.addset(wordset)
def updatePhrases(self, phrases):
"""
Updates the phrase sets for this thesaurus with the inputted list
of phrases.
:param phrases | [<str>, ..]
"""
for phrase in phrases:
self.addPhrase(phrase)
import pytest
@pytest.fixture()
def orb():
import orb
from projex import security
key = security.generateKey('T3st!ng')
orb.system.security().setKey(key)
return orb
@pytest.fixture()
def PrivateClass(orb):
class Private(orb.Table):
id = orb.IdColumn()
public = orb.StringColumn()
private = orb.StringColumn(flags={'Private'})
@classmethod
def __auth__(cls, **context):
return True
return Private
def test_overwritten_auth_respects_private_column_flag(PrivateClass):
record = PrivateClass()
cols = set([ col for col, value in record])
assert len(cols) == 2
assert cols == set(['id', 'public'])
+2
-1
Metadata-Version: 2.0
Name: orb-api
Version: 2016.1.45
Version: 2016.0.252
Summary: Database ORM and API builder.

@@ -9,2 +9,3 @@ Home-page: https://github.com/orb-framework/orb

License: MIT
Description-Content-Type: UNKNOWN
Platform: UNKNOWN

@@ -11,0 +12,0 @@ Requires-Dist: projex (>=2015.0.12)

@@ -1,1 +0,1 @@

{"license": "MIT", "name": "orb-api", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "test_requires": [{"requires": ["tox", "coverage", "pytest"]}], "summary": "Database ORM and API builder.", "run_requires": [{"requires": ["projex (>=2015.0.12)", "inflection", "pycrypto", "pyparsing", "pyyaml", "pytz"]}], "version": "2016.1.45", "extensions": {"python.details": {"project_urls": {"Home": "https://github.com/orb-framework/orb"}, "document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "eric.hulser@gmail.com", "name": "Eric Hulser"}]}}, "extras": []}
{"description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "eric.hulser@gmail.com", "name": "Eric Hulser", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/orb-framework/orb"}}}, "extras": [], "generator": "bdist_wheel (0.30.0)", "license": "MIT", "metadata_version": "2.0", "name": "orb-api", "run_requires": [{"requires": ["inflection", "projex (>=2015.0.12)", "pycrypto", "pyparsing", "pytz", "pyyaml"]}], "summary": "Database ORM and API builder.", "test_requires": [{"requires": ["coverage", "pytest", "tox"]}], "version": "2016.0.252"}
__major__ = 2016
__minor__ = 1
__revision__ = "45"
__hash__ = "g7d127cc"
__minor__ = 0
__revision__ = "252"
__hash__ = "g7c99212"

@@ -107,2 +107,3 @@ """

if (not column or
column.testFlag(column.Flags.Private) or
column.testFlag(column.Flags.RequiresExpand) or

@@ -109,0 +110,0 @@ not auth(columns=(column,), context=context)):

+123
-9
orb/__init__.py,sha256=nrlB5nRcR2ysspoSaE8SrLmBj-DGJs4bNROwUEc5fQk,1517
orb/_version.py,sha256=tZ1eDYWhGi0WuhAZ7Odj6WL9U_CVZ_q117zd-fs2tPg,72
orb/_version.py,sha256=EbAbKVLJ9Kqj01fUgYiRPm8c9SHDzVWQGIT8NoJ3SrY,73
orb/common.py,sha256=bfpIwqjol8s22WeUrXu8vUQg3cPs2qKprX1DjvbGlbM,2240
orb/contexts.py,sha256=YBJaNtKhxHeraufl7JyKpHXxKced6EZtRE8Cl72mkBI,509
orb/decorators.py,sha256=gLko_VYjt1-LdBXa0nChPO2EcPD3EUFJsnTKhIDAFKo,1565
orb/errors.py,sha256=YwgpmA_EEj3mfhcgfCvI7MQp8I8x-eL6L-flh0N5QNE,9425
orb/manager.py,sha256=w-fhewW3mtrPjBxqZ9SMVdW0OI9xCorigsnMebLaaV0,36446
orb/pyi_hook.py,sha256=VcmkilePlzx1x8kJlfzBEuh6yhP7ILVhAGAch8dl4Ok,677
orb/rest.py,sha256=JDiZXHrdbMW84rak6Lq7sjwPB2dKOhCuTunBe912sUY,478
orb/settings.py,sha256=cd-ZslpMuZDUy-ZtHMb3YVmKVd8PXjjluWRouLILo_U,747
orb/caching/__init__.py,sha256=UwEHLy6oTA3FTQdm9paQniLnhNmgbdnN88CTkXpZ8A4,170
orb/caching/datacache.py,sha256=mCHppSvJHA8yxRV5SfaDm-ZN3m3y2zfWyswhWfRQl3U,3822
orb/caching/recordcache.py,sha256=IJDfbWaxMJqS_TTU0LqDU1JbqjnEeNr-G4SlNrOghJI,10865
orb/caching/tablecache.py,sha256=YY0Tn5GgD0D2O-X4MmVJ6MxYhzBcPrxNPeA1xbydB8k,3684
orb/caching/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
orb/caching/backends/__plugins__.py,sha256=N5guQhA_ktzIaavjbcEw_IUNirEo_sbWXJxgjeDkmkQ,102
orb/caching/backends/basiccache.py,sha256=NWBth1v2m5so4uGGUNQPIvMY4Y2hoPspoSEhfK81mAg,2446
orb/caching/backends/rediscache.py,sha256=BGDBBbWeis2-CKHm5rjTuoZYDOJHKxP2aTo_HheoRtA,2091
orb/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

@@ -13,7 +26,9 @@ orb/core/collection.py,sha256=surhGg3x3RBwSI7y7b3idjG8ERUhjqMv36EJdzX-tsI,29528

orb/core/database.py,sha256=eccBEJBJM70RuCQtvJYVHDGvPcmNTAkfG7VLsRIpwa4,11885
orb/core/environment.py,sha256=PNpegGLigeO_2aEKEHEYp2waHVzEd7UUFg0eP-0o0FI,8418
orb/core/events.py,sha256=7logLhpGezR2f-hKH4_naWur837pb6WkIHmmuS17zng,2823
orb/core/index.py,sha256=xzDIZRbTtfnWqb2p9K0AdwcNyr6U9ku-EcYOO_rgMHw,5647
orb/core/metamodel.py,sha256=xNGR_m1-4LZ2JBtIAmso87SlMEgSkw591AluOkIs8UI,5643
orb/core/model.py,sha256=oNUcPB9RiCGg6zSPV7s0ZSe9AKejO42oB9Rhdyud4os,46238
orb/core/model.py,sha256=8qpJN38elA_oqIqBcVWQ2cynhMEPQAcKxv_igaqPRtU,46299
orb/core/modelmixin.py,sha256=g2jYUSVqOTBXpCBynzGRZHdfYluNXSaHSPeVAIDiqi4,36
orb/core/options.py,sha256=zToJ-EAZDO_-6v-n6CXT5kjWhwgSm5psNehQfL3IMrw,15793
orb/core/pipe.py,sha256=Z9dGoHpSv9aXeKhFYhO097Or8GxOePvhawAyexAgCgs,3071

@@ -26,3 +41,65 @@ orb/core/query.py,sha256=nu-By18JBed5LpwWSqSepBqabL3-uHUa0P4wtMeqepo,46036

orb/core/system.py,sha256=DOHNH9X_OYSqPZtWcOLjN7rGi-Al_NWGEVT51jv0LoY,4995
orb/core/transaction.py,sha256=e4Tm-PGq6sHO7u_Qm6x1dZpDLxZrhe9PH3qsqRs1XsE,4379
orb/core/transactions.py,sha256=0RelKNja7RitkG2reHRYO_02fCbOzDaesoFRmCMiens,511
orb/core/backends/__init__.py,sha256=bcAZMaZNYIGyVsJhlssA7PzA3AahhjMquBWzjms5JPI,106
orb/core/backends/__plugins__.py,sha256=Bavo76QJkFr5x_bdGZvqN646uUTuwQYFya3_rvJWV8I,111
orb/core/backends/nonsql/__init__.py,sha256=BUZFQeGxh_7pCO-JX_xF1OwbG_KlpZvDrYeJrNTF8YY,58
orb/core/backends/sql/__init__.py,sha256=KbXZHNOPh0T9DNOwg7ng2ybLbXgybXkVq6HFAS8sArY,143
orb/core/backends/sql/abstractconnection.py,sha256=tK3SiUh3HRleEVQYrWa6hbiFKhcK2Yo254d1HyVgymA,30149
orb/core/backends/sql/abstractsql.py,sha256=Rf7KAS_DRHVm0zP4NFZ7ZwUfvOTHg05-K6Jg6alwH1I,4821
orb/core/backends/sql/mysql/__init__.py,sha256=IYzUB2K8vGG8vmyC-b3x7nm0mwcCSJeMXkds1AVa998,168
orb/core/backends/sql/mysql/connection.py,sha256=bGcs9hC3fKDA-lqLrhLktt3ezB_EfSLwVy1cenOGEfM,7010
orb/core/backends/sql/mysql/sql.py,sha256=DLGjbvyI0ez9O4sgb8_Q2VfWjaNyMt1eDwVdxMnvNB0,1994
orb/core/backends/sql/mysql/store.py,sha256=22zRiRI5udLzLVbZ0BbbAsWTZPfvGoHpzARMImBf7x0,179
orb/core/backends/sql/mysql/statements/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
orb/core/backends/sql/mysql/statements/__plugins__.py,sha256=x_J-P13iPE05JL04ghf4B39_km3RUlyMhIx05X1umNI,1231
orb/core/backends/sql/mysql/statements/add_column.sql.mako,sha256=KlTK7J8YfSASbsL5o0CWuZDC8UpBzWaJFxa_qUacLow,355
orb/core/backends/sql/mysql/statements/alter_table.sql.mako,sha256=r74tZ4ZXG1bgI9lH87rGopzGjJoL-WQU15Rx4tAasiM,1246
orb/core/backends/sql/mysql/statements/create_index.sql.mako,sha256=J_OTG7pAhHR3KN4XBBS4fv8XS5UPI0pkfBkOGm5uL9s,1805
orb/core/backends/sql/mysql/statements/create_table.sql.mako,sha256=9ZwWwg_BgExdzRYq1gsn2lu1zdu_ngDbvfv4_m5t0NI,1117
orb/core/backends/sql/mysql/statements/create_view.sql.mako,sha256=wC6uFZmwBr37S8yz9PA-2O-pVZyWKm01_xk0tGjbFZQ,7270
orb/core/backends/sql/mysql/statements/delete.sql.mako,sha256=_kDZ2A3VmR7WQXp7xt__Ppg1BI2mE-W0ymjFDQ7UgQo,70
orb/core/backends/sql/mysql/statements/enable_internals.sql.mako,sha256=WYFNWMFzYXXgSqI8TrpgymJLEMleSvi6JeHf5NnamdI,209
orb/core/backends/sql/mysql/statements/insert.sql.mako,sha256=AiK1usC73QJCdkGcy8HPyG_G1i915KuoaynnUcDXLaY,737
orb/core/backends/sql/mysql/statements/quote.sql.mako,sha256=wpv-cIP1R36cjarpCeTxcYvnncmEZyOnf-MxAt5BrGk,50
orb/core/backends/sql/mysql/statements/schema_info.sql.mako,sha256=j8GK4uXOQJp1skAQp1TvJ9vFFr9Z-kA_i8S-ociYago,606
orb/core/backends/sql/mysql/statements/select.sql.mako,sha256=UnXqGtgR78WdY67Tnyq4V2DBR_tr4pHDy5xak3-PS70,7617
orb/core/backends/sql/mysql/statements/select_aggregate.sql.mako,sha256=_vH9B9iCqXZ2Tiw2Z3xxeD3NWjZgBhQ0PwCn_ZZLXSw,1728
orb/core/backends/sql/mysql/statements/select_count.sql.mako,sha256=4w4fOF1D0jfD2ky_-yrJK5Leu1pUuNbAdQOuYOWw4O8,405
orb/core/backends/sql/mysql/statements/select_expand.sql.mako,sha256=WLqH0Ji1SGaZ63QcQyNfk14mtSIpQbFj3ODxArqiEVg,7478
orb/core/backends/sql/mysql/statements/select_joiner.sql.mako,sha256=Sub3lPMX-9S2KFO2cSkP_kQDN_qkewMEXHPJjcsZwBg,1400
orb/core/backends/sql/mysql/statements/select_shortcut.sql.mako,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
orb/core/backends/sql/mysql/statements/setup_db.sql.mako,sha256=G-jx5--9iOJK1jvLWyC7NbE0oDenbygiinMO_VxZnRE,491
orb/core/backends/sql/mysql/statements/update.sql.mako,sha256=Ci8FEA79G8lO7P7jfhCh9XBln0WTtD18tno7KZZrHs4,1909
orb/core/backends/sql/mysql/statements/where.sql.mako,sha256=tvu2EAAsRvL4pLGs04mODdy9jDvyIEQDx-j-XFo3dIY,2344
orb/core/backends/sql/postgresql/__init__.py,sha256=Yca9cP8w5vAzj_U2kPE9Jml_AFGMOQ9yVRxtT-TThvA,165
orb/core/backends/sql/postgresql/connection.py,sha256=gSKrnKnNxoutWOVdawQX7iIe_n19on9tfyRvOKZWgms,7727
orb/core/backends/sql/postgresql/sql.py,sha256=iFkgXTx-JOyFiDRvtHL2NHye4Z7Rr_XbTNq23VIg_lk,1984
orb/core/backends/sql/postgresql/store.py,sha256=OFf4jwyKCPx-6ttolDING5jALNnsSERjHmx7qVFTdYk,179
orb/core/backends/sql/postgresql/statements/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
orb/core/backends/sql/postgresql/statements/__plugins__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
orb/core/backends/sql/postgresql/statements/add_column.sql.mako,sha256=a3x6km22PCori2G30FJjCoFlIlgtmqEBeO4ogfb5Wcs,355
orb/core/backends/sql/postgresql/statements/alter_table.sql.mako,sha256=1vBaxHyTVtVNJA9dML9eAXwsQaW55ZCwtNoUwHqWH84,1246
orb/core/backends/sql/postgresql/statements/create_index.sql.mako,sha256=0JVV9PclffTWgn5ZIu6LILL8CXhGpCHbY8Czcr8gFuo,1805
orb/core/backends/sql/postgresql/statements/create_table.sql.mako,sha256=U_BAmN12GlEu16xCl6heSckt6g5ya3wbnvAv1u6tiEQ,1119
orb/core/backends/sql/postgresql/statements/create_view.sql.mako,sha256=WWQcDIZtLCqckVNYEZCxR4E22XB-Y7BiqUbS0q-lafw,7288
orb/core/backends/sql/postgresql/statements/delete.sql.mako,sha256=Zap7vTFMlX74ctgj7nWo5G2eSZJJg5Fh7ITH8S3aUyQ,70
orb/core/backends/sql/postgresql/statements/enable_internals.sql.mako,sha256=UAfugs18h5G8bO0qkgSzYaumhJ7XZAd-NFPJPO7Hvmc,209
orb/core/backends/sql/postgresql/statements/insert.sql.mako,sha256=6ljSw1KCQ35EX6nDTx1XksgD27LX8Yq9U_S_IJhA160,737
orb/core/backends/sql/postgresql/statements/quote.sql.mako,sha256=cqNw2AmIQk95Ue0owMlCS4hM1f2uqn6cf4CqoGNuVCw,50
orb/core/backends/sql/postgresql/statements/schema_info.sql.mako,sha256=70BF-47cncT-HRpKdj4dTUShdmWeaQq-4pvMTZymsYU,606
orb/core/backends/sql/postgresql/statements/select.sql.mako,sha256=I4xdL3EbtZILC0Fr9C56k7ZseRKMDwr_8T3hhGRBG_M,7794
orb/core/backends/sql/postgresql/statements/select_aggregate.sql.mako,sha256=qD602bFoXpK-RGKJ3fRIoCAdPE0sUE4ety7lRxpW7eg,1728
orb/core/backends/sql/postgresql/statements/select_count.sql.mako,sha256=4w4fOF1D0jfD2ky_-yrJK5Leu1pUuNbAdQOuYOWw4O8,405
orb/core/backends/sql/postgresql/statements/select_expand.sql.mako,sha256=vM9otY6-U9aJHaw4P1mpIwsgmEJrFe8yGLU0onETXm8,8957
orb/core/backends/sql/postgresql/statements/select_joiner.sql.mako,sha256=CN0hElPkoYzGs-MCMYJJpEnLX3uCKL6JFgWeEYtnLh4,1400
orb/core/backends/sql/postgresql/statements/select_shortcut.sql.mako,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
orb/core/backends/sql/postgresql/statements/setup_db.sql.mako,sha256=zRXstQOG54SG_GpORI11BX9Q8g0BWsbIDl5l4ol7qOU,491
orb/core/backends/sql/postgresql/statements/update.sql.mako,sha256=bFwRIanpbQz8HZ7obtW7TvlhMpCBQQwXA1pvYkQyxuc,1909
orb/core/backends/sql/postgresql/statements/where.sql.mako,sha256=aDHUe7fiojTDdYPe1OS3M00nEt1XzB-FFZcxy3K6gRQ,2131
orb/core/backends/sql/shared/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
orb/core/backends/sql/shared/sql.py,sha256=Xlxj9c5L4xDzUxA5BHljkwUHPtDxWoMr2kPIrTM5Sy8,24809
orb/core/backends/web/__init__.py,sha256=ywkfvMGe4hzAU61PMCtA_OgS9YaXD35yWS72gjOUZY8,121
orb/core/backends/web/quickbase/__init__.py,sha256=_T8jvuPfHypNmO7GHcv7AWMKxI6fOtrqS6OMjnNYGJ0,106
orb/core/backends/web/quickbase/quickbase.py,sha256=foCSF1kQQYxpwcJtLNt2SaRufyhYgXAJP1wPmA7blZQ,18540
orb/core/column_types/__init__.py,sha256=a_9ozezsL3XvSJQWvZKjsHJ7BYy3Zm1h-ElUHJUPyQ4,172

@@ -92,13 +169,48 @@ orb/core/column_types/boolean.py,sha256=ELtiuS0MQuBSg5qNOlHXFUO-AhSVZrE1RBPl-TPtCDY,953

orb/core/model_types/view.py,sha256=Hl9MOIULKQzdT1bS3kXNnTsr1D5kk8gmhL1kdOrnw44,332
orb/data/__init__.py,sha256=AQA_t1hQNxjAXecaMqa20DLsahCJu6W-FAK0tdos3DA,231
orb/data/converter.py,sha256=gcu-pvYGCsEM11WA8oZmQA0UJDO9VLPljQzlbhykmXo,954
orb/data/join.py,sha256=mauIpzDyk6RsoGynMnU63WqCwi2UViXPmak7kjxk2QI,5535
orb/data/piperecordset.py,sha256=1bYwrWxyz9hzIqL30frtgA55jBjMfzy9NNeYZq06MKo,6578
orb/data/recordset.py,sha256=9uIEcjc78iy1ff0yyVefpZASPgWEC5w3uPeoY1ynEPg,54783
orb/data/store.py,sha256=ghmI1p-J29xeM2BFBR96fp_2w9Hj21sPZoiKfxf393k,6249
orb/data/converters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
orb/data/converters/__plugins__.py,sha256=j6BZcLius9vcs6OMzixsLUbZNr_HhzH20SWs5NuRe2M,57
orb/data/converters/qt.py,sha256=hC8TH6Q0B1kSuPbo1wujC6nHrMF7HN4hKaqWCxC9Gqo,2282
orb/querying/__init__.py,sha256=GMAqxdEnQBMMqEdc9d-sCWptUl_EAUZluX0gNuDnEYQ,210
orb/querying/query.py,sha256=KGZOoEfG_L5tW48h5ktCwDQNnDIe9IQHQtsE3elZ2l4,75756
orb/querying/queryaggregate.py,sha256=kHQS65ZFJPZ7qx4ZrKLcfUFQIloubx7pMOwf_rkHa4g,1528
orb/querying/querycompound.py,sha256=PhmOPo96XqfnppYRdPAu7noBFJB2uLxxWSlucwvJLa0,17556
orb/querying/querypattern.py,sha256=_xRet5F62C-XrAPrYx-LKMeu881P1YReope4mq8NitA,1131
orb/schema/__init__.py,sha256=RcDDO5Eclxap2A2RDa2oON0XpFGRgxJ6q-P0-8JAOU4,496
orb/schema/column.py,sha256=yDzXFlJvCRdntrFp-DMOJJDK2EfZYPSbhHDtbrRfvlw,61198
orb/schema/columnaggregator.py,sha256=Sjk9cMOFPI1Vb1UJctM7Wftp3Qmaa9fZtdlmwwO-Brk,3818
orb/schema/columnjoiner.py,sha256=dFQW40IqGCQ23PIVpUlMp4FvNF3ghZMwndDlewGBZmA,2197
orb/schema/dynamic.py,sha256=olXs20-GjmhE1jMw1ml5NNJeppVwooK-GOxfgXwntcQ,316
orb/schema/index.py,sha256=1eVfNZcffGzU1i2fFSrg04JyZcIngh5JWtDGhJDN_eo,10069
orb/schema/pipe.py,sha256=gMY8YBTcvaoqeApFBCQ69GcxdeeZOwrzDeHrokD-WsE,12833
orb/schema/table.py,sha256=tzSiQLKGLAvylbjjiY7h17vJ8Hma2h1fhZWfzjbuTZY,78186
orb/schema/tablegroup.py,sha256=Gm_aLZo0mDFXV4miizUBIYmE81oItBocm4fxv9yMLX0,12238
orb/schema/tableschema.py,sha256=Z4c9hIUxzStYqjkn0ZHHPmsIqyw3YkeBewY9WiksciY,44137
orb/schema/validator.py,sha256=uIiS97RIbA2SSvlykR-gRrGtin2BuUtgqfox2pt14nc,3029
orb/schema/view.py,sha256=C4evXfuHDG0mHlc7sgOpK6fB6KbEH7rHldvQ7z-e3gM,71073
orb/schema/viewschema.py,sha256=sQXIhPi0Uz8QBOcMhwIGhADg7cmgRKuS99znwBwUzHE,40173
orb/schema/meta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
orb/schema/meta/metatable.py,sha256=WGIcfedzLtv5QLBaLk-ynIH7D27gS2d15ygjc_I9HUw,21450
orb/schema/meta/metaview.py,sha256=ycnvGv758AWpRizrHiLQmvm8kwqHqXDoGaC3G_MlvVk,20629
orb/searching/__init__.py,sha256=PJCIG8JCR3oNOyYes7Qs7GkGpV0IA8jbExQw33MpkwM,232
orb/searching/engine.py,sha256=8MFNUk_O5QJjhkRCOdvby-xIzX0bUjp5ArmV7Ss4BO0,3586
orb/searching/spelling.py,sha256=u_pL_d7o4GGWQxUCUx4yeMfp-0FrYZgZpkeRWqreMzU,5487
orb/searching/terms.py,sha256=gKlvwuvHPSkEKodqwWCX4dgTD4LvQv1XgZvV2Yd2vFU,9181
orb/searching/thesaurus.py,sha256=rYCxMjtINxR_d9ErO0s92JCmHYl9STjWEZbg3f0P8QU,7355
orb/testing/__init__.py,sha256=usIXBwq9YJ3ioZ507xIhj54geabeZyN4UamcPmFKK3A,87
orb/testing/mock_connection.py,sha256=1VKftcUQ1oGH8Z6ufTezZgNomW0h1a8PDhkKLFxWS64,8578
orb_api-2016.1.45.dist-info/DESCRIPTION.rst,sha256=dZf13pl8GJQNzQ4W3SN8eIBft5Qw0TvQcsTSh0c3bXA,1048
orb_api-2016.1.45.dist-info/METADATA,sha256=6vaEDbviRgJerX1yZV8mGAq_vMAPFmZ2hP98IG6WH9Y,1431
orb_api-2016.1.45.dist-info/metadata.json,sha256=_bYgnRurXZjzRU85Ht3b0qpKeEyUZijCJmxsg9P3Pu0,609
orb_api-2016.1.45.dist-info/RECORD,,
orb_api-2016.1.45.dist-info/top_level.txt,sha256=DK5wsuJha_mOlcN64PSr5K_BJil8iyJHBA-juFCxGB4,10
orb_api-2016.1.45.dist-info/WHEEL,sha256=54bVun1KfEBTJ68SHUmbxNPj80VxlQ0sHi4gZdGZXEY,92
orb_api-2016.0.252.dist-info/DESCRIPTION.rst,sha256=dZf13pl8GJQNzQ4W3SN8eIBft5Qw0TvQcsTSh0c3bXA,1048
orb_api-2016.0.252.dist-info/METADATA,sha256=Rv98yC0omkMh82_LhNyH5o7PiBCbhfQw0j8x_QzEaH4,1466
orb_api-2016.0.252.dist-info/RECORD,,
orb_api-2016.0.252.dist-info/WHEEL,sha256=hZvi-8L6mc2COqhOUWREM_h2lvnJbaHycZt8eohtmso,92
orb_api-2016.0.252.dist-info/metadata.json,sha256=m-SEkH21aSvutwucWJc_WXJpClT5H8td9mzAPYhNG4Y,649
orb_api-2016.0.252.dist-info/top_level.txt,sha256=DK5wsuJha_mOlcN64PSr5K_BJil8iyJHBA-juFCxGB4,10
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
tests/functional/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
tests/functional/conftest.py,sha256=H0qCP-vrjob4R6fAerfvwAjUefAQIJ9GNRt_afPlC9Y,6413
tests/functional/conftest.py,sha256=QFJLyJBIn-OV3J8kB4zVrjc95QlbUZVszKVo5gf6UR4,6414
tests/functional/mysql/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

@@ -123,4 +235,6 @@ tests/functional/mysql/test_my_api.py,sha256=-pdCNBuIm0zmngnc3zmXly83PIsTIYH_yikG2Ggu10k,11003

tests/unit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
tests/unit/conftest.py,sha256=9PBjeBlDXIbRMk5DZ3f5hUl13HHzrx5kMoaW3TtN8Ns,482
tests/unit/test_decorators.py,sha256=S_v1NzzSq1zCCHYZJC3SJTPfdf78uzzXbRMqtLrvRpA,4889
tests/unit/test_errors.py,sha256=n92j7uUuToRPbb0ImDLqM_I7yueHykLWhFvj-sAdqp0,9752
tests/unit/test_model.py,sha256=UXK-4k90l7ONgzvg3u3FBcKNSyszOXp79eI5qha1Ptk,213
tests/unit/test_settings.py,sha256=X5wXxC_g0FBays9L9n3cmPatSWTWZdlJggsPdMmnFxk,3270

@@ -226,2 +226,2 @@ import pytest

def Employee(testing_schema):
return testing_schema['Employee']
return testing_schema['Employee']
+1
-1
Wheel-Version: 1.0
Generator: bdist_wheel (0.24.0)
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: true
Tag: py2-none-any