Launch Week Day 5: Introducing Reachability for PHP.Learn More
Socket
Book a DemoSign in
Socket

py-postgresql

Package Overview
Dependencies
Maintainers
1
Versions
17
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

py-postgresql - pypi Package Compare versions

Comparing version
1.2.2
to
1.3.0
+14
postgresql/documentation/sphinx/changes-v1.3.rst
Changes in v1.3
===============
1.3.0
-----
* Commit DB-API 2.0 ClientCannotConnect exception correction.
* Eliminate types-as-documentation annotations.
* Add Connection.transaction alias for asyncpg consistency.
* Eliminate multiple inheritance in `postgresql.api` in favor of ABC registration.
* Add support for PGTEST environment variable (pq-IRI) to improve test performance
and to aid in cases where the target fixture is already available.
This should help for testing the driver against servers that are not actually
postgresql.
##
# .iri
##
"""
Parse and serialize PQ IRIs.
PQ IRIs take the form::
pq://user:pass@host:port/database?setting=value&setting2=value2
IPv6 is supported via the standard representation::
pq://[::1]:5432/database
Driver Parameters:
pq://user@host/?[driver_param]=value&[other_param]=value?server_setting=val
"""
from .resolved import riparse as ri
from .string import split_ident
from operator import itemgetter
get0 = itemgetter(0)
del itemgetter
import re
escape_path_re = re.compile('[%s]' %(re.escape(ri.unescaped + ','),))
def structure(d, fieldproc = ri.unescape):
"""
Create a clientparams dictionary from a parsed RI.
"""
if d.get('scheme', 'pq').lower() not in {'pq', 'postgres', 'postgresql'}:
raise ValueError("PQ-IRI scheme is not 'pq', 'postgres', or 'postgresql'")
cpd = {
k : fieldproc(v) for k, v in d.items()
if k not in ('path', 'fragment', 'query', 'host', 'scheme')
}
path = d.get('path')
frag = d.get('fragment')
query = d.get('query')
host = d.get('host')
if host is not None:
if host.startswith('[') and host.endswith(']'):
host = host[1:-1]
if host.startswith('unix:'):
cpd['unix'] = host[len('unix:'):].replace(':','/')
else:
cpd['host'] = host
else:
phost = fieldproc(host)
if '/' in phost[:1]:
cpd['unix'] = phost
else:
cpd['host'] = host
if path:
# Only state the database field's existence if the first path is non-empty.
if path[0]:
cpd['database'] = path[0]
path = path[1:]
if path:
cpd['path'] = path
settings = {}
if query:
if hasattr(query, 'items'):
qiter = query.items()
else:
qiter = query
for k, v in qiter:
if k.startswith('[') and k.endswith(']'):
k = k[1:-1]
if k != 'settings' and k not in cpd:
cpd[fieldproc(k)] = fieldproc(v)
elif k:
settings[fieldproc(k)] = fieldproc(v)
# else: ignore empty query keys
if frag:
settings['search_path'] = [
fieldproc(x) for x in frag.split(',')
]
if settings:
cpd['settings'] = settings
return cpd
def construct_path(x, re = escape_path_re):
"""
Join a path sequence using ',' and escaping ',' in the pieces.
"""
return ','.join((re.sub(ri.re_pct_encode, y) for y in x))
def construct(x, obscure_password = False):
"""
Construct a RI dictionary from a clientparams dictionary.
"""
# the rather exhaustive settings choreography is due to
# a desire to allow the search_path to be appended in the fragment
settings = x.get('settings')
no_path_settings = None
search_path = None
if settings:
if isinstance(settings, dict):
siter = settings.items()
search_path = settings.get('search_path')
else:
siter = list(settings)
search_path = [(k,v) for k,v in siter if k == 'search_path']
search_path.append((None,None))
search_path = search_path[-1][1]
no_path_settings = [(k,v) for k,v in siter if k != 'search_path']
if not no_path_settings:
no_path_settings = None
# It could be a string search_path, split if it is.
if search_path is not None and isinstance(search_path, str):
search_path = split_ident(search_path, sep = ',')
port = None
if 'unix' in x:
host = '[unix:' + x['unix'].replace('/',':') + ']'
# ignore port.. it's a mis-config.
elif 'host' in x:
host = x['host']
if ':' in host:
host = '[' + host + ']'
port = x.get('port')
else:
host = None
port = x.get('port')
path = []
if 'database' in x:
path.append(x['database'])
if 'path' in x:
path.extend(x['path'] or ())
password = x.get('password')
if obscure_password and password is not None:
password = '***'
driver_params = list({
'[' + k + ']' : str(v) for k,v in x.items()
if k not in (
'user', 'password', 'port', 'database', 'ssl',
'path', 'host', 'unix', 'ipv','settings'
)
}.items())
driver_params.sort(key=get0)
return (
'pqs' if x.get('ssl', False) is True else 'pq',
# netloc: user:pass@host[:port]
ri.unsplit_netloc((
x.get('user'),
password,
host,
None if 'port' not in x else str(x['port'])
)),
None if not path else '/'.join([
ri.escape_path_re.sub(path_comp, '/')
for path_comp in path
]),
(ri.construct_query(driver_params) if driver_params else None)
if no_path_settings is None else (
ri.construct_query(
driver_params + no_path_settings
)
),
None if search_path is None else construct_path(search_path),
)
def parse(s, fieldproc = ri.unescape):
"""
Parse a Postgres IRI into a dictionary object.
"""
return structure(
# In ri.parse, don't unescape the parsed values as our sub-structure
# uses the escape mechanism in IRIs to specify literal separator
# characters.
ri.parse(s, fieldproc = str),
fieldproc = fieldproc
)
def serialize(x, obscure_password = False):
"""
Return a Postgres IRI from a dictionary object.
"""
return ri.unsplit(construct(x, obscure_password = obscure_password))
if __name__ == '__main__':
import sys
for x in sys.argv[1:]:
print("{src} -> {parsed!r} -> {serial}".format(
src = x,
parsed = parse(x),
serial = serialize(parse(x))
))
### About
py-postgresql is a Python 3 package providing modules for working with PostgreSQL.
Primarily, a high-level driver for querying databases.
For a high performance async interface, MagicStack's asyncpg
http://github.com/MagicStack/asyncpg should be considered.
py-postgresql, currently, does not have direct support for high-level async
interfaces provided by recent versions of Python. Future versions may change this.
### Advisory
In v1.3, `postgresql.driver.dbapi20.connect` will now raise `ClientCannotConnectError` directly.
Exception traps around connect should still function, but the `__context__` attribute
on the error instance will be `None` in the usual failure case as it is no longer
incorrectly chained. Trapping `ClientCannotConnectError` ahead of `Error` should
allow both cases to co-exist in the event that data is being extracted from
the `ClientCannotConnectError`.
In v2.0, support for older versions of PostgreSQL and Python will be removed.
If you have automated installations using PyPI, make sure that they specify a major version.
### Installation
Using PyPI.org:
$ pip install py-postgresql
From a clone:
$ git clone https://github.com/python-postgres/fe.git
$ cd fe
$ python3 ./setup.py install # Or use in-place without installation(PYTHONPATH).
### Basic Usage
```python
import postgresql
db = postgresql.open('pq://user:password@host:port/database')
get_table = db.prepare("SELECT * from information_schema.tables WHERE table_name = $1")
print(get_table("tables"))
# Streaming, in a transaction.
with db.xact():
for x in get_table.rows("tables"):
print(x)
```
### Documentation
http://py-postgresql.readthedocs.io
### Related
- http://postgresql.org
- http://python.org
+51
-50
Metadata-Version: 2.1
Name: py-postgresql
Version: 1.2.2
Version: 1.3.0
Summary: PostgreSQL driver and tools library.

@@ -10,51 +10,2 @@ Home-page: http://github.com/python-postgres/fe

Maintainer-email: james.pye@gmail.com
License: UNKNOWN
Description:
.. warning::
In v1.3, `postgresql.driver.dbapi20.connect` will now raise `ClientCannotConnectError` directly.
Exception traps around connect should still function, but the `__context__` attribute
on the error instance will be `None` in the usual failure case as it is no longer
incorrectly chained. Trapping `ClientCannotConnectError` ahead of `Error` should
allow both cases to co-exist in the event that data is being extracted from
the `ClientCannotConnectError`.
py-postgresql is a set of Python modules providing interfaces to various parts
of PostgreSQL. Primarily, it provides a pure-Python driver with some C optimizations for
querying a PostgreSQL database.
http://github.com/python-postgres/fe
Features:
* Prepared Statement driven interfaces.
* Cluster tools for creating and controlling a cluster.
* Support for most PostgreSQL types: composites, arrays, numeric, lots more.
* COPY support.
Sample PG-API Code::
>>> import postgresql
>>> db = postgresql.open('pq://user:password@host:port/database')
>>> db.execute("CREATE TABLE emp (emp_first_name text, emp_last_name text, emp_salary numeric)")
>>> make_emp = db.prepare("INSERT INTO emp VALUES ($1, $2, $3)")
>>> make_emp("John", "Doe", "75,322")
>>> with db.xact():
... make_emp("Jane", "Doe", "75,322")
... make_emp("Edward", "Johnson", "82,744")
...
There is a DB-API 2.0 module as well::
postgresql.driver.dbapi20
However, PG-API is recommended as it provides greater utility.
Once installed, try out the ``pg_python`` console script::
$ python3 -m postgresql.bin.pg_python -h localhost -p port -U theuser -d database_name
If a successful connection is made to the remote host, it will provide a Python
console with the database connection bound to the `db` name.
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable

@@ -71,2 +22,52 @@ Classifier: Intended Audience :: Developers

Classifier: Topic :: Database
Requires-Python: >=3.8
Description-Content-Type: text/x-rst
License-File: LICENSE
License-File: AUTHORS
.. warning::
In v1.3, `postgresql.driver.dbapi20.connect` will now raise `ClientCannotConnectError` directly.
Exception traps around connect should still function, but the `__context__` attribute
on the error instance will be `None` in the usual failure case as it is no longer
incorrectly chained. Trapping `ClientCannotConnectError` ahead of `Error` should
allow both cases to co-exist in the event that data is being extracted from
the `ClientCannotConnectError`.
py-postgresql is a set of Python modules providing interfaces to various parts
of PostgreSQL. Primarily, it provides a pure-Python driver with some C optimizations for
querying a PostgreSQL database.
http://github.com/python-postgres/fe
Features:
* Prepared Statement driven interfaces.
* Cluster tools for creating and controlling a cluster.
* Support for most PostgreSQL types: composites, arrays, numeric, lots more.
* COPY support.
Sample PG-API Code::
>>> import postgresql
>>> db = postgresql.open('pq://user:password@host:port/database')
>>> db.execute("CREATE TABLE emp (emp_first_name text, emp_last_name text, emp_salary numeric)")
>>> make_emp = db.prepare("INSERT INTO emp VALUES ($1, $2, $3)")
>>> make_emp("John", "Doe", "75,322")
>>> with db.xact():
... make_emp("Jane", "Doe", "75,322")
... make_emp("Edward", "Johnson", "82,744")
...
There is a DB-API 2.0 module as well::
postgresql.driver.dbapi20
However, PG-API is recommended as it provides greater utility.
Once installed, try out the ``pg_python`` console script::
$ python3 -m postgresql.bin.pg_python -h localhost -p port -U theuser -d database_name
If a successful connection is made to the remote host, it will provide a Python
console with the database connection bound to the `db` name.

@@ -14,3 +14,3 @@ ##

"""
import collections
import collections.abc
import abc

@@ -129,3 +129,4 @@

"""
Close the Result handle.
Close the Result discarding any supporting resources and causing
future read operations to emit empty record sets.
"""

@@ -207,14 +208,8 @@

class Chunks(
Result,
collections.Iterator,
collections.Iterable,
):
@collections.abc.Iterator.register
class Chunks(Result):
pass
class Cursor(
Result,
collections.Iterator,
collections.Iterable,
):
@collections.abc.Iterator.register
class Cursor(Result):
"""

@@ -265,6 +260,3 @@ A `Cursor` object is an interface to a sequence of tuples(rows). A result

@abc.abstractmethod
def read(self,
quantity : "Number of rows to read" = None,
direction : "Direction to fetch in, defaults to `self.direction`" = None,
) -> ["Row"]:
def read(self, quantity = None, direction = None) -> ["Row"]:
"""

@@ -319,3 +311,3 @@ Read, fetch, the specified number of rows and return them in a list.

@abc.abstractmethod
def __call__(self, *parameters : "Positional Parameters") -> ["Row"]:
def __call__(self, *parameters) -> ["Row"]:
"""

@@ -332,3 +324,3 @@ Execute the prepared statement with the given arguments as parameters.

@abc.abstractmethod
def column(self, *parameters) -> collections.Iterable:
def column(self, *parameters) -> collections.abc.Iterable:
"""

@@ -354,3 +346,3 @@ Return an iterator producing the values of first column of the

@abc.abstractmethod
def chunks(self, *parameters) -> collections.Iterable:
def chunks(self, *parameters) -> collections.abc.Iterable:
"""

@@ -369,3 +361,3 @@ Return an iterator producing sequences of rows produced by the cursor

be filled in. The rows contained in the sequences are only required to
support the basic `collections.Sequence` interfaces; simple and quick
support the basic `collections.abc.Sequence` interfaces; simple and quick
sequence types should be used.

@@ -375,3 +367,3 @@ """

@abc.abstractmethod
def rows(self, *parameters) -> collections.Iterable:
def rows(self, *parameters) -> collections.abc.Iterable:
"""

@@ -394,3 +386,3 @@ Return an iterator producing rows produced by the cursor

@abc.abstractmethod
def column(self, *parameters) -> collections.Iterable:
def column(self, *parameters) -> collections.abc.Iterable:
"""

@@ -420,3 +412,3 @@ Return an iterator producing the values of the first column in

@abc.abstractmethod
def first(self, *parameters) -> "'First' object that is returned by the query":
def first(self, *parameters):
"""

@@ -440,5 +432,3 @@ Execute the prepared statement with the given arguments as parameters.

@abc.abstractmethod
def load_rows(self,
iterable : "A iterable of tuples to execute the statement with"
):
def load_rows(self, iterable):
"""

@@ -460,5 +450,3 @@ Given an iterable, `iterable`, feed the produced parameters to the

@abc.abstractmethod
def load_chunks(self,
iterable : "A iterable of chunks of tuples to execute the statement with"
):
def load_chunks(self, iterable):
"""

@@ -481,7 +469,6 @@ Given an iterable, `iterable`, feed the produced parameters of the chunks

class Statement(
Element,
collections.Callable,
collections.Iterable,
):
@collections.abc.Iterator.register
@collections.abc.Callable.register
@Execution.register
class Statement(Element):
"""

@@ -612,9 +599,6 @@ Instances of `Statement` are returned by the `prepare` method of

"""
Execution.register(Statement)
PreparedStatement = Statement
class StoredProcedure(
Element,
collections.Callable,
):
@collections.abc.Callable.register
class StoredProcedure(Element):
"""

@@ -627,3 +611,3 @@ A function stored on the database.

@abc.abstractmethod
def __call__(self, *args, **kw) -> (object, Cursor, collections.Iterable):
def __call__(self, *args, **kw) -> (object, Cursor, collections.abc.Iterable):
"""

@@ -778,6 +762,4 @@ Execute the procedure with the given arguments. If keyword arguments are

class Settings(
Element,
collections.MutableMapping
):
@collections.abc.MutableMapping.register
class Settings(Element):
"""

@@ -901,6 +883,3 @@ A mapping interface to the session's settings. This provides a direct

@abc.abstractmethod
def xact(self,
isolation : "ISOLATION LEVEL to use with the transaction" = None,
mode : "Mode of the transaction, READ ONLY or READ WRITE" = None,
) -> Transaction:
def xact(self, isolation = None, mode = None) -> Transaction:
"""

@@ -947,6 +926,11 @@ Create a `Transaction` object using the given keyword arguments as its

@abc.abstractmethod
def statement_from_id(self,
statement_id : "The statement's identification string.",
) -> Statement:
def query(self, sql : str, *args) -> Execution:
"""
Prepare and execute the statement, `sql`, with the given arguments.
Equivalent to ``db.prepare(sql)(*args)``.
"""
@abc.abstractmethod
def statement_from_id(self, statement_id) -> Statement:
"""
Create a `Statement` object that was already prepared on the

@@ -960,5 +944,3 @@ server. The distinction between this and a regular query is that it

@abc.abstractmethod
def cursor_from_id(self,
cursor_id : "The cursor's identification string."
) -> Cursor:
def cursor_from_id(self, cursor_id) -> Cursor:
"""

@@ -976,6 +958,3 @@ Create a `Cursor` object from the given `cursor_id` that was already

@abc.abstractmethod
def proc(self,
procedure_id : \
"The procedure identifier; a valid ``regprocedure`` or Oid."
) -> StoredProcedure:
def proc(self, procedure_id) -> StoredProcedure:
"""

@@ -1054,3 +1033,3 @@ Create a `StoredProcedure` instance using the given identifier.

@abc.abstractmethod
def iternotifies(self, timeout = None) -> collections.Iterator:
def iternotifies(self, timeout = None) -> collections.abc.Iterator:
"""

@@ -1121,3 +1100,3 @@ Return an iterator to the notifications received by the connection. The

@abc.abstractmethod
def socket_secure(self, socket : "socket object") -> "secured socket":
def socket_secure(self, socket):
"""

@@ -1132,3 +1111,3 @@ Return a reference to the secured socket using the given parameters.

@abc.abstractmethod
def socket_factory_sequence(self) -> [collections.Callable]:
def socket_factory_sequence(self) -> [collections.abc.Callable]:
"""

@@ -1172,3 +1151,3 @@ Return a sequence of `SocketCreator`s that `Connection` objects will use to

def __init__(self,
user : "required keyword specifying the user name(str)" = None,
user : str = None,
password : str = None,

@@ -1209,11 +1188,2 @@ database : str = None,

@abc.abstractmethod
def query(self) -> Execution:
"""
The :py:class:`Execution` instance providing a one-shot query interface::
connection.query.<method>(sql, *parameters) == connection.prepare(sql).<method>(*parameters)
"""
@property
@abc.abstractmethod
def closed(self) -> bool:

@@ -1346,14 +1316,9 @@ """

def init(self,
initdb : "path to the initdb to use" = None,
user : "name of the cluster's superuser" = None,
password : "superuser's password" = None,
encoding : "the encoding to use for the cluster" = None,
locale : "the locale to use for the cluster" = None,
collate : "the collation to use for the cluster" = None,
ctype : "the ctype to use for the cluster" = None,
monetary : "the monetary to use for the cluster" = None,
numeric : "the numeric to use for the cluster" = None,
time : "the time to use for the cluster" = None,
text_search_config : "default text search configuration" = None,
xlogdir : "location for the transaction log directory" = None,
initdb = None,
user = None, password = None,
encoding = None, locale = None,
collate = None, ctype = None,
monetary = None, numeric = None, time = None,
text_search_config = None,
xlogdir = None,
):

@@ -1396,5 +1361,3 @@ """

@abc.abstractmethod
def wait_until_started(self,
timeout : "maximum time to wait" = 10
):
def wait_until_started(self, timeout = 10):
"""

@@ -1410,5 +1373,3 @@ After the start() method is ran, the database may not be ready for use.

@abc.abstractmethod
def wait_until_stopped(self,
timeout : "maximum time to wait" = 10
):
def wait_until_stopped(self, timeout = 10):
"""

@@ -1448,3 +1409,1 @@ After the stop() method is ran, the database may still be running.

help(__package__ + '.api')
##
# vim: ts=3:sw=3:noet:

@@ -137,3 +137,1 @@ ##

sys.exit(command(sys.argv))
##
# vim: ts=3:sw=3:noet:

@@ -23,3 +23,3 @@ ##

>>> normal_params = {
'user' : 'jwp',
'user' : 'dbusername',
'host' : 'localhost',

@@ -509,3 +509,3 @@ 'settings' : {'default_statistics_target' : 200, 'search_path' : 'home,public'}

def x_pg_ldap(ldap_url, config):
raise NotImplementedError("cannot resolve ldap URLs: " + str(ldap_url))
raise Exception("cannot resolve ldap URLs")

@@ -585,11 +585,11 @@ default_x_callbacks = {

def collect(
parsed_options = None,
no_defaults = False,
environ = os.environ,
environ_prefix = 'PG',
default_pg_sysconfdir = None,
pg_service_file = None,
prompt_title = '',
parameters = (),
):
parsed_options = None,
no_defaults = False,
environ = os.environ,
environ_prefix = 'PG',
default_pg_sysconfdir = None,
pg_service_file = None,
prompt_title = '',
parameters = (),
):
"""

@@ -596,0 +596,0 @@ Build a normalized client parameters dictionary for use with a connection

@@ -43,19 +43,31 @@ ##

class ClusterInitializationError(ClusterError):
"General cluster initialization failure"
"""
General cluster initialization failure.
"""
code = '-Cini'
class InitDBError(ClusterInitializationError):
"A non-zero result was returned by the initdb command"
"""
A non-zero result was returned by the initdb command.
"""
code = '-Cidb'
class ClusterStartupError(ClusterError):
"Cluster startup failed"
"""
Cluster startup failed.
"""
code = '-Cbot'
class ClusterNotRunningError(ClusterError):
"Cluster is not running"
"""
Cluster is not running.
"""
code = '-Cdwn'
class ClusterTimeoutError(ClusterError):
"Cluster operation timed out"
"""
Cluster operation timed out.
"""
code = '-Cout'
class ClusterWarning(pg_exc.Warning):
"Warning issued by cluster operations"
"""
Warning issued by cluster operations.
"""
code = '-Cwrn'

@@ -158,6 +170,3 @@ source = 'CLUSTER'

def __init__(self,
installation,
data_directory,
):
def __init__(self, installation, data_directory):
self.installation = installation

@@ -195,7 +204,3 @@ self.data_directory = os.path.abspath(data_directory)

def init(self,
password = None,
timeout = None,
**kw
):
def init(self, password = None, timeout = None, **kw):
"""

@@ -329,6 +334,3 @@ Create the cluster at the given `data_directory` using the

def start(self,
logfile = None,
settings = None
):
def start(self, logfile = None, settings = None):
"""

@@ -569,6 +571,3 @@ Start the cluster.

def wait_until_started(self,
timeout = 10,
delay = 0.05,
):
def wait_until_started(self, timeout = 10, delay = 0.05):
"""

@@ -622,6 +621,3 @@ After the `start` method is used, this can be ran in order to block

def wait_until_stopped(self,
timeout = 10,
delay = 0.05
):
def wait_until_stopped(self, timeout = 10, delay = 0.05):
"""

@@ -648,3 +644,1 @@ After the `stop` method is used, this can be ran in order to block until

time.sleep(delay)
##
# vim: ts=3:sw=3:noet:
##
# .configfile
##
'PostgreSQL configuration file parser and editor functions.'
"""
PostgreSQL configuration file parser and editor functions.
"""
import sys

@@ -79,3 +81,5 @@ import os

def write_config(map, writer, keys = None):
'A configuration writer that will trample & merely write the settings'
"""
A configuration writer that will trample & merely write the settings.
"""
if keys is None:

@@ -86,8 +90,6 @@ keys = map

def alter_config(
map : "the configuration changes to make",
fo : "file object containing configuration lines(Iterable)",
keys : "the keys to change; defaults to map.keys()" = None
):
'Alters a configuration file without trampling on the existing structure'
def alter_config(map, fo, keys = None):
"""
Alters a configuration file without trampling on the existing structure.
"""
if keys is None:

@@ -217,3 +219,3 @@ keys = list(map.keys())

Every action will cause the file to be wholly read, so using `update` to make
Every operation will cause the file to be wholly read, so using `update` to make
multiple changes is desirable.

@@ -321,3 +323,1 @@ """

return cfg
##
# vim: ts=3:sw=3:noet:

@@ -12,3 +12,3 @@ ##

from abc import abstractmethod, abstractproperty
from collections import Iterator
from collections.abc import Iterator
from .python.element import Element, ElementSet

@@ -447,3 +447,3 @@ from .python.structlib import ulong_unpack, ulong_pack

def __init__(self,
recv_into : "callable taking writable buffer and size",
recv_into,
buffer_size = default_buffer_size

@@ -450,0 +450,0 @@ ):

@@ -89,7 +89,7 @@ Commands

$ python3 -m postgresql.bin.pg_python -h localhost -W -m timeit "prepare('SELECT 1').first()"
Password for pg_python[pq://jwp@localhost:5432]:
Password for pg_python[pq://dbusername@localhost:5432]:
1000 loops, best of 3: 1.35 msec per loop
$ python3 -m postgresql.bin.pg_python -h localhost -W -m timeit -s "ps=prepare('SELECT 1')" "ps.first()"
Password for pg_python[pq://jwp@localhost:5432]:
Password for pg_python[pq://dbusername@localhost:5432]:
1000 loops, best of 3: 442 usec per loop

@@ -100,3 +100,3 @@

$ python3 -m postgresql.bin.pg_python -h localhost -W
Password for pg_python[pq://jwp@localhost:5432]:
Password for pg_python[pq://dbusername@localhost:5432]:
>>> ps = prepare('select 1')

@@ -147,9 +147,9 @@ >>> ps.first()

$ echo "setting = value" >pg.conf
# change 'setting'
$ python3 -m postgresql.bin.pg_dotconf pg.conf setting=newvalue
$ cat pg.conf
setting = 'newvalue'
# new settings are appended to the file

@@ -160,6 +160,6 @@ $ python3 -m postgresql.bin.pg_dotconf pg.conf another_setting=value

another_setting = 'value'
# comment a setting
$ python3 -m postgresql.bin.pg_dotconf pg.conf another_setting
$ cat pg.conf

@@ -166,0 +166,0 @@ setting = 'newvalue'

@@ -71,3 +71,3 @@ Client Parameters

Environment variables to extract client parameter variables from.
Defaults to `os.environ` and expects a `collections.Mapping` interface.
Defaults to `os.environ` and expects a `collections.abc.Mapping` interface.

@@ -104,5 +104,5 @@ ``environ_prefix``

>>> p = pg_param.collect(prompt_title = 'my_prompt!', parameters = {'prompt_password':True})
Password for my_prompt![pq://jwp@localhost:5432]:
Password for my_prompt![pq://dbusername@localhost:5432]:
>>> p
{'host': 'localhost', 'user': 'jwp', 'password': 'secret', 'port': 5432}
{'host': 'localhost', 'user': 'dbusername', 'password': 'secret', 'port': 5432}

@@ -114,3 +114,3 @@ If `None`, it will leave the necessary password resolution information in the

>>> p
{'pgpassfile': '/Users/jwp/.pgpass', 'prompt_password': True, 'host': 'localhost', 'user': 'jwp', 'port': 5432}
{'pgpassfile': '/home/{USER}/.pgpass', 'prompt_password': True, 'host': 'localhost', 'user': 'dbusername', 'port': 5432}

@@ -124,5 +124,5 @@ Of course, ``'prompt_password'`` is normally specified when ``parsed_options``

>>> p=pg_param.collect(parsed_options = co)
Password for [pq://jwp@localhost:5432]:
Password for [pq://dbusername@localhost:5432]:
>>> p
{'host': 'localhost', 'user': 'jwp', 'password': 'secret', 'port': 5432}
{'host': 'localhost', 'user': 'dbusername', 'password': 'secret', 'port': 5432}
>>>

@@ -173,6 +173,6 @@

>>> p
{'pgpassfile': '/Users/jwp/.pgpass', 'host': 'localhost', 'user': 'jwp', 'port': 5432}
{'pgpassfile': '/home/{USER}/.pgpass', 'host': 'localhost', 'user': 'dbusername', 'port': 5432}
>>> pg_param.resolve_password(p)
>>> p
{'host': 'localhost', 'password': 'secret', 'user': 'jwp', 'port': 5432}
{'host': 'localhost', 'password': 'secret', 'user': 'dbusername', 'port': 5432}

@@ -179,0 +179,0 @@

@@ -351,3 +351,3 @@ .. _cluster_management:

``Cluster.settings``
A `collections.Mapping` interface to the ``postgresql.conf`` file of the
A `collections.abc.Mapping` interface to the ``postgresql.conf`` file of the
cluster.

@@ -354,0 +354,0 @@

@@ -263,3 +263,3 @@ .. _pg_copyman:

``postgresql.copyman.IteratorProducer(collections.Iterator)``
``postgresql.copyman.IteratorProducer(collections.abc.Iterator)``
Given an Iterator producing *chunks* of COPY lines, construct a Producer to

@@ -266,0 +266,0 @@ manage the data coming from the iterator.

@@ -8,10 +8,2 @@ Gotchas

Non-English Locales
-------------------
Many non-english locales are not supported due to the localization of the severity field
in messages and errors sent to the client. Internally, py-postgresql uses this to allow
client side filtering of messages and to identify FATAL connection errors that allow the
client to recognize that it should be expecting the connection to terminate.
Thread Safety

@@ -116,1 +108,9 @@ -------------

items.
Non-English Locales
-------------------
In the past, some builds of PostgreSQL localized the severity field of some protocol messages.
`py-postgresql` expects these fields to be consistent with their english terms. If the driver
raises strange exceptions during the use of non-english locales, it may be necessary to use an
english setting in order to coax the server into issueing familiar terms.

@@ -17,8 +17,7 @@ py-postgresql

driver
copyman
clientparameters
cluster
notifyman
alock
cluster
lib
clientparameters
copyman
gotchas

@@ -41,2 +40,3 @@

changes-v1.3
changes-v1.2

@@ -43,0 +43,0 @@ changes-v1.1

@@ -23,3 +23,3 @@ .. _notifyman:

messages to come in on a set of connections, pick up the messages, and deliver
the messages to the object's user via the `collections.Iterator` protocol.
the messages to the object's user via the `collections.abc.Iterator` protocol.

@@ -26,0 +26,0 @@

@@ -328,3 +328,3 @@ ##

# Explicitly manage DB-API connected state to properly
# throw the already closed error. This will be active in 1.3.
# throw the already closed error.
_dbapi_connected_flag = False

@@ -365,3 +365,3 @@

def close(self):
if self.closed:# and self._dbapi_connected_flag:
if self.closed and self._dbapi_connected_flag:
raise Error(

@@ -368,0 +368,0 @@ "connection already closed",

@@ -39,10 +39,16 @@ ##

class Exception(Exception):
'Base PostgreSQL exception class'
"""
Base PostgreSQL exception class.
"""
pass
class LoadError(Exception):
'Failed to load a library'
"""
Failed to load a library.
"""
class Disconnection(Exception):
'Exception identifying errors that result in disconnection'
"""
Exception identifying errors that result in disconnection.
"""

@@ -84,3 +90,5 @@ class Warning(Message):

class Error(Message, Exception):
'A PostgreSQL Error'
"""
A PostgreSQL Error.
"""
_e_label = 'ERROR'

@@ -90,3 +98,5 @@ code = ''

def __str__(self):
'Call .sys.errformat(self)'
"""
Call .sys.errformat(self).
"""
return pg_sys.errformat(self)

@@ -100,3 +110,5 @@

class DriverError(Error):
"Errors originating in the driver's implementation."
"""
Errors originating in the driver's implementation.
"""
source = 'CLIENT'

@@ -116,3 +128,5 @@ code = '--000'

class ConnectTimeoutError(DriverError, Disconnection):
'Client was unable to esablish a connection in the given time'
"""
Client was unable to esablish a connection in the given time.
"""
code = '--TOE'

@@ -152,3 +166,5 @@

class ConnectionFailureError(ConnectionError):
'Raised when a connection is dropped'
"""
Raised when a connection is dropped.
"""
code = '08006'

@@ -173,3 +189,5 @@

class FeatureError(Error):
"Unsupported feature"
"""
"Unsupported feature.
"""
code = '0A000'

@@ -197,3 +215,5 @@

class CardinalityError(Error):
"Wrong number of rows returned"
"""
Wrong number of rows returned.
"""
code = '21000'

@@ -208,3 +228,5 @@

class DPDSEError(Error):
"Dependent Privilege Descriptors Still Exist"
"""
Dependent Privilege Descriptors Still Exist.
"""
code = '2B000'

@@ -215,3 +237,5 @@ class DPDSEObjectError(DPDSEError):

class SREError(Error):
"SQL Routine Exception"
"""
SQL Routine Exception.
"""
code = '2F000'

@@ -228,3 +252,5 @@ class FunctionExecutedNoReturnStatementError(SREError):

class EREError(Error):
"External Routine Exception"
"""
External Routine Exception.
"""
code = '38000'

@@ -241,3 +267,5 @@ class ContainingSQLNotPermittedError(EREError):

class ERIEError(Error):
"External Routine Invocation Exception"
"""
External Routine Invocation Exception.
"""
code = '39000'

@@ -254,3 +282,5 @@ class InvalidSQLState(ERIEError):

class TRError(TransactionError):
"Transaction Rollback"
"""
Transaction Rollback.
"""
code = '40000'

@@ -268,3 +298,5 @@ class DeadlockError(TRError):

class ITSError(TransactionError):
"Invalid Transaction State"
"""
Invalid Transaction State.
"""
code = '25000'

@@ -282,9 +314,15 @@ class ActiveTransactionError(ITSError):

class ReadOnlyTransactionError(ITSError):
"Occurs when an alteration occurs in a read-only transaction."
"""
Occurs when an alteration occurs in a read-only transaction.
"""
code = '25006'
class SchemaAndDataStatementsError(ITSError):
"Mixed schema and data statements not allowed."
"""
Mixed schema and data statements not allowed.
"""
code = '25007'
class InconsistentCursorIsolationError(ITSError):
"The held cursor requires the same isolation."
"""
The held cursor requires the same isolation.
"""
code = '25008'

@@ -295,3 +333,5 @@

class InFailedTransactionError(ITSError):
"Occurs when an action occurs in a failed transaction."
"""
Occurs when an action occurs in a failed transaction.
"""
code = '25P02'

@@ -301,3 +341,5 @@

class SavepointError(TransactionError):
"Classification error designating errors that relate to savepoints."
"""
Classification error designating errors that relate to savepoints.
"""
code = '3B000'

@@ -311,3 +353,5 @@ class InvalidSavepointSpecificationError(SavepointError):

class IRError(Error):
"Insufficient Resource Error"
"""
Insufficient Resource Error.
"""
code = '53000'

@@ -322,3 +366,5 @@ class MemoryError(IRError, MemoryError):

class PLEError(OverflowError):
"Program Limit Exceeded"
"""
Program Limit Exceeded
"""
code = '54000'

@@ -333,3 +379,5 @@ class ComplexityOverflowError(PLEError):

class ONIPSError(Error):
"Object Not In Prerequisite State"
"""
Object Not In Prerequisite State.
"""
code = '55000'

@@ -345,3 +393,5 @@ class ObjectInUseError(ONIPSError):

class SEARVError(Error):
"Syntax Error or Access Rule Violation"
"""
Syntax Error or Access Rule Violation.
"""
code = '42000'

@@ -469,3 +519,5 @@

class ICVError(Error):
"Integrity Contraint Violation"
"""
Integrity Contraint Violation.
"""
code = '23000'

@@ -564,3 +616,5 @@ class RestrictError(ICVError):

class EscapeCharacterError(DataError):
"Invalid escape character"
"""
Invalid escape character.
"""
code = '2200C'

@@ -599,3 +653,5 @@

class SIOError(Error):
"System I/O"
"""
System I/O.
"""
code = '58000'

@@ -608,3 +664,5 @@ class UndefinedFileError(SIOError):

class CFError(Error):
"Configuration File Error"
"""
Configuration File Error.
"""
code = 'F0000'

@@ -615,3 +673,5 @@ class LockFileExistsError(CFError):

class OIError(Error):
"Operator Intervention"
"""
Operator Intervention.
"""
code = '57000'

@@ -625,10 +685,16 @@ class QueryCanceledError(OIError):

class ServerNotReadyError(OIError, Disconnection):
'Thrown when a connection is established to a server that is still starting up.'
"""
Thrown when a connection is established to a server that is still starting up.
"""
code = '57P03'
class PLPGSQLError(Error):
"Error raised by a PL/PgSQL procedural function"
"""
Error raised by a PL/PgSQL procedural function.
"""
code = 'P0000'
class PLPGSQLRaiseError(PLPGSQLError):
"Error raised by a PL/PgSQL RAISE statement."
"""
Error raised by a PL/PgSQL RAISE statement.
"""
code = 'P0001'

@@ -645,5 +711,5 @@ class PLPGSQLNoDataFoundError(PLPGSQLError):

def map_errors_and_warnings(
objs : "A iterable of `Warning`s and `Error`'s",
error_container : "apply the code to error association to this object" = code_to_error,
warning_container : "apply the code to warning association to this object" = code_to_warning,
objs,
error_container = code_to_error,
warning_container = code_to_warning,
):

@@ -686,5 +752,5 @@ """

def code_lookup(
default : "The object to return when no code or class is found",
container : "where to look for the object associated with the code",
code : "the code to find the exception for"
default,
container,
code
):

@@ -714,3 +780,1 @@ obj = container.get(code)

)
##
# vim: ts=3:sw=3:noet:

@@ -9,3 +9,3 @@ ##

pq://user:pass@host:port/database?setting=value&setting2=value2#public,othernamespace
pq://user:pass@host:port/database?setting=value&setting2=value2

@@ -18,3 +18,3 @@ IPv6 is supported via the standard representation::

pq://user@host/?[driver_param]=value&[other_param]=value?setting=val
pq://user@host/?[driver_param]=value&[other_param]=value?server_setting=val
"""

@@ -32,5 +32,7 @@ from .resolved import riparse as ri

def structure(d, fieldproc = ri.unescape):
'Create a clientparams dictionary from a parsed RI'
if d.get('scheme', 'pq').lower() != 'pq':
raise ValueError("PQ-IRI scheme is not 'pq'")
"""
Create a clientparams dictionary from a parsed RI.
"""
if d.get('scheme', 'pq').lower() not in {'pq', 'postgres', 'postgresql'}:
raise ValueError("PQ-IRI scheme is not 'pq', 'postgres', or 'postgresql'")
cpd = {

@@ -96,3 +98,5 @@ k : fieldproc(v) for k, v in d.items()

def construct(x, obscure_password = False):
'Construct a RI dictionary from a clientparams dictionary'
"""
Construct a RI dictionary from a clientparams dictionary.
"""
# the rather exhaustive settings choreography is due to

@@ -174,3 +178,5 @@ # a desire to allow the search_path to be appended in the fragment

def parse(s, fieldproc = ri.unescape):
'Parse a Postgres IRI into a dictionary object'
"""
Parse a Postgres IRI into a dictionary object.
"""
return structure(

@@ -185,3 +191,5 @@ # In ri.parse, don't unescape the parsed values as our sub-structure

def serialize(x, obscure_password = False):
'Return a Postgres IRI from a dictionary object.'
"""
Return a Postgres IRI from a dictionary object.
"""
return ri.unsplit(construct(x, obscure_password = obscure_password))

@@ -188,0 +196,0 @@

@@ -67,7 +67,7 @@ ##

def __init__(self,
message : "The primary information of the message",
code : "Message code to attach (SQL state)" = None,
details : "additional information associated with the message" = {},
source : "Which side generated the message(SERVER, CLIENT)" = None,
creator : "The interface element that called for instantiation" = None,
message,
code = None,
details = {},
source = None,
creator = None,
):

@@ -74,0 +74,0 @@ self.message = message

@@ -109,3 +109,3 @@ ##

"""
Queue the notifies for the specified connection. Upon success, the
Queue the notifies for the specified connection.

@@ -190,3 +190,5 @@ This method can be overridden by subclasses to take a callback approach

def gettimeout(self):
'Get the timeout.'
"""
Get the timeout assigned by `settimeout`.
"""
return self.timeout

@@ -193,0 +195,0 @@

##
# .pgpassfile - parse and lookup passwords in a pgpassfile
##
'Parse pgpass files and subsequently lookup a password.'
"""
Parse pgpass files and subsequently lookup a password.
"""
import os.path

@@ -33,3 +35,5 @@

def parse(data):
'produce a list of [(word, (host,port,dbname,user))] from a pgpass file object'
"""
Produce a list of [(word, (host,port,dbname,user))] from a pgpass file object.
"""
return [

@@ -54,3 +58,5 @@ (x[-1], x[0:4]) for x in [split(line) for line in data] if x

def lookup_password_file(path, t):
'like lookup_password, but takes a file path'
"""
Like lookup_password, but takes a file path.
"""
with open(path) as f:

@@ -57,0 +63,0 @@ return lookup_password(parse(f), t)

@@ -627,4 +627,1 @@ /*

};
/*
* vim: ts=3:sw=3:noet:
*/

@@ -338,4 +338,1 @@ /*

}
/*
* vim: ts=3:sw=3:noet:
*/

@@ -152,4 +152,1 @@ /*

}
/*
* vim: ts=3:sw=3:noet:
*/

@@ -287,4 +287,1 @@ /*

};
/*
* vim: ts=3:sw=3:noet:
*/

@@ -11,3 +11,3 @@ """

version_info = (1, 2, 2)
version_info = (1, 3, 0)
version = '.'.join(map(str, version_info))

@@ -272,3 +272,5 @@ ##

def standard_read_messages(self):
'read more messages into self.read when self.read is empty'
"""
Read more messages into self.read when self.read is empty.
"""
r = True

@@ -318,3 +320,5 @@ if not self.read:

):
'protocol message writer'
"""
Protocol message writer.
"""
if self.writing is not self.written:

@@ -332,3 +336,5 @@ self.message_data += cat_messages(self.writing)

def traced_write_messages(self, messages):
'message_writer used when tracing'
"""
`message_writer` used when tracing.
"""
for msg in messages:

@@ -352,3 +358,5 @@ t = getattr(msg, 'type', None)

def traced_read_messages(self):
'message_reader used when tracing'
"""
`message_reader` used when tracing.
"""
r = self.standard_read_messages()

@@ -440,3 +448,5 @@ for msg in self.read:

def complete(self):
'complete the current transaction'
"""
Complete the current transaction.
"""
# Continue to transition until all transactions have been

@@ -443,0 +453,0 @@ # completed, or an exception occurs that does not signal retry.

@@ -138,4 +138,3 @@ ##

"""
An absolutely empty message. When serialized, it always yields an empty
string.
An absolutely empty message. When serialized, it always yields an empty string.
"""

@@ -150,3 +149,3 @@ type = b''

return b''
def __new__(typ, *args, **kw):

@@ -183,3 +182,5 @@ return VoidMessage

class EmptyMessage(Message):
'An abstract message that is always empty'
"""
An abstract message that is always empty.
"""
__slots__ = ()

@@ -201,3 +202,5 @@ type = b''

class Notify(Message):
'Asynchronous notification message'
"""
Asynchronous notification message.
"""
type = message_types[b'A'[0]]

@@ -223,4 +226,5 @@ __slots__ = ('pid', 'channel', 'payload',)

class ShowOption(Message):
"""ShowOption(name, value)
GUC variable information from backend"""
"""
GUC variable information from backend
"""
type = message_types[b'S'[0]]

@@ -241,3 +245,5 @@ __slots__ = ('name', 'value')

class Complete(StringMessage):
'Command completion message.'
"""
Command completion message.
"""
type = message_types[b'C'[0]]

@@ -269,3 +275,5 @@ __slots__ = ()

class Null(EmptyMessage):
'Null command'
"""
Null command.
"""
type = message_types[b'I'[0]]

@@ -277,3 +285,5 @@ __slots__ = ()

class NoData(EmptyMessage):
'Null command'
"""
Null command.
"""
type = message_types[b'n'[0]]

@@ -285,3 +295,5 @@ __slots__ = ()

class ParseComplete(EmptyMessage):
'Parse reaction'
"""
Parse reaction.
"""
type = message_types[b'1'[0]]

@@ -293,3 +305,5 @@ __slots__ = ()

class BindComplete(EmptyMessage):
'Bind reaction'
"""
Bind reaction.
"""
type = message_types[b'2'[0]]

@@ -301,3 +315,5 @@ __slots__ = ()

class CloseComplete(EmptyMessage):
'Close statement or Portal'
"""
Close statement or Portal.
"""
type = message_types[b'3'[0]]

@@ -309,3 +325,5 @@ __slots__ = ()

class Suspension(EmptyMessage):
'Portal was suspended, more tuples for reading'
"""
Portal was suspended, more tuples for reading.
"""
type = message_types[b's'[0]]

@@ -874,3 +892,2 @@ __slots__ = ()

"""
type = message_types[b'F'[0]]

@@ -877,0 +894,0 @@ __slots__ = ('oid', 'aformats', 'arguments', 'rformat')

@@ -19,3 +19,6 @@ ##

class pq_message_stream(object):
'provide a message stream from a data stream'
"""
Provide a message stream from a data stream.
"""
_block = 512

@@ -28,3 +31,6 @@ _limit = _block * 4

def truncate(self):
"remove all data in the buffer"
"""
Remove all data in the buffer.
"""
self._strio.truncate(0)

@@ -34,3 +40,6 @@ self._start = 0

def _rtruncate(self, amt = None):
"[internal] remove the given amount of data"
"""
[internal] remove the given amount of data.
"""
strio = self._strio

@@ -64,3 +73,6 @@ if amt is None:

def has_message(self, xl_unpack = xl_unpack, len = len):
"if the buffer has a message available"
"""
Whether the buffer has a message available.
"""
strio = self._strio

@@ -78,3 +90,6 @@ strio.seek(self._start)

def __len__(self, xl_unpack = xl_unpack, len = len):
"number of messages in buffer"
"""
Number of messages in buffer.
"""
count = 0

@@ -81,0 +96,0 @@ rpos = self._start

##
# .protocol.xact3 - protocol state machine
##
'PQ version 3.0 client transactions'
"""
PQ version 3.0 client transactions.
"""
import sys

@@ -97,6 +99,3 @@ import os

def __init__(self,
startup_message : "startup message to send",
password : "password source data(encoded password bytes)",
):
def __init__(self, startup_message, password):
self.startup_message = startup_message

@@ -439,3 +438,5 @@ self.password = password

def messages_received(self):
'Received and validate messages'
"""
Received and validate messages.
"""
return chain.from_iterable(map(get1, self.completed))

@@ -442,0 +443,0 @@

@@ -636,3 +636,1 @@ ##

sys.exit(command())
##
# vim: ts=3:sw=3:noet:

@@ -10,3 +10,5 @@ ##

class RecursiveFactor(Exception):
'Raised when a factor is ultimately composed of itself'
"""
Raised when a factor is ultimately composed of itself.
"""
pass

@@ -100,3 +102,5 @@

def prime_factor(obj):
'get the primary factor on the `obj`, returns None if none.'
"""
Get the primary factor on the `obj`, returns None if none.
"""
f = getattr(obj, '_e_factors', None)

@@ -131,3 +135,5 @@ if f:

def format_element(obj, coverage = ()):
'format the given element with its factors and metadata into a readable string'
"""
Format the given element with its factors and metadata into a readable string.
"""
# if it's not an Element, all there is to return is str(obj)

@@ -134,0 +140,0 @@ if obj in coverage:

@@ -7,6 +7,6 @@ ##

"""
import collections
import collections.abc
from itertools import cycle, islice
def interlace(*iters, next = next) -> collections.Iterable:
def interlace(*iters, next = next) -> collections.abc.Iterable:
"""

@@ -13,0 +13,0 @@ interlace(i1, i2, ..., in) -> (

@@ -5,3 +5,3 @@ ##

"""
Release management code and project meta-data.
Release management code and project/release meta-data.
"""

@@ -120,4 +120,4 @@ ##

def prefixed_extensions(
prefix : "prefix to prepend to paths" = default_prefix,
extensions_data : "`extensions_data`" = extensions_data,
prefix = default_prefix,
extensions_data = extensions_data,
) -> [Extension]:

@@ -138,3 +138,3 @@ """

def prefixed_packages(
prefix : "prefix to prepend to source paths" = default_prefix,
prefix = default_prefix,
packages = subpackages,

@@ -152,3 +152,3 @@ ):

def prefixed_package_data(
prefix : "prefix to prepend to dictionary keys paths" = default_prefix,
prefix = default_prefix,
package_data = subpackage_data,

@@ -183,2 +183,3 @@ ):

'cmdclass': dict(test=TestCommand),
'python_requires': '>=3.8',
}

@@ -185,0 +186,0 @@ if build_extensions:

@@ -1,5 +0,1 @@

# -*- encoding: utf-8 -*-
##
# copyright 2008, James William Pye. http://jwp.name
##
"""

@@ -72,3 +68,5 @@ Split, unsplit, parse, serialize, construct and structure resource indicators.

def unescape(x, mkval = chr):
'Substitute percent escapes with literal characters'
"""
Substitute percent escapes with literal characters.
"""
nstr = type(x)('')

@@ -197,3 +195,5 @@ if isinstance(x, str):

def unsplit(t):
'Make a RI from a split RI(5-tuple)'
"""
Make a RI from a split RI(5-tuple).
"""
s = ''

@@ -270,3 +270,5 @@ if t[0] is not None:

def unsplit_netloc(t):
'Create a netloc fragment from the given tuple(user,password,host,port)'
"""
Create a netloc fragment from the given tuple(user,password,host,port).
"""
if t[0] is None and t[2] is None:

@@ -346,3 +348,5 @@ return None

def construct(x):
'Construct a RI tuple(5-tuple) from a dictionary object'
"""
Construct a RI tuple(5-tuple) from a dictionary object.
"""
p = x.get('path')

@@ -385,5 +389,7 @@ if p is not None:

def serialize(x):
'Return an RI from a dictionary object. Synonym for ``unsplit(construct(x))``'
"""
Return an RI from a dictionary object. Synonym for ``unsplit(construct(x))``.
"""
return unsplit(construct(x))
__docformat__ = 'reStructuredText'

@@ -19,11 +19,17 @@ ##

def escape_literal(text):
"Replace every instance of ' with ''"
"""
Replace every instance of ' with ''.
"""
return text.replace("'", "''")
def quote_literal(text):
"Escape the literal and wrap it in [single] quotations"
"""
Escape the literal and wrap it in [single] quotations.
"""
return "'" + text.replace("'", "''") + "'"
def escape_ident(text):
'Replace every instance of " with ""'
"""
Replace every instance of " with "".
"""
return text.replace('"', '""')

@@ -35,3 +41,5 @@

def quote_ident(text):
"Replace every instance of '"' with '""' *and* place '"' on each end"
"""
Replace every instance of '"' with '""' *and* place '"' on each end.
"""
return '"' + text.replace('"', '""') + '"'

@@ -57,3 +65,3 @@

numbered indexes are unquoted portions, while odd indexes are quoted
portions.
portions.

@@ -220,3 +228,5 @@ Unquoted portions are regular strings, whereas quoted portions are

def qname(*args):
"Quote the identifiers and join them using '.'"
"""
Quote the identifiers and join them using '.'.
"""
return '.'.join([quote_ident(x) for x in args])

@@ -223,0 +233,0 @@

@@ -33,3 +33,3 @@ ##

"""
Built-in error formatter. DON'T TOUCH!
Built-in error formatter. Do not change.
"""

@@ -89,3 +89,5 @@ it = val._e_metas()

def reset_errformat(with_func = errformat):
'restore the original excformat function'
"""
Restore the original excformat function.
"""
global errformat

@@ -95,4 +97,6 @@ errformat = with_func

def reset_msghook(with_func = msghook):
'restore the original msghook function'
"""
Restore the original msghook function.
"""
global msghook
msghook = with_func

@@ -32,4 +32,4 @@ ##

#: Format the cluster directory name.
cluster_dirname = 'pg_tmp_{0}_{1}'.format
format_sandbox_id = staticmethod(('sandbox{0}_{1}').format)
cluster_dirname = staticmethod(('pg_tmp_{0}_{1}').format)
cluster = None

@@ -95,3 +95,3 @@

):
if self.cluster is not None:
if self.cluster is not None or 'PGTEST' in os.environ:
return

@@ -161,3 +161,3 @@ ##

# Start it up.
# Start the database cluster.
with open(self.logfile, 'w') as lfo:

@@ -171,14 +171,19 @@ cluster.start(logfile = lfo)

c.execute('create database test')
# It's ready.
self.cluster = cluster
def push(self):
c = self.cluster.connection(user = 'test')
c.connect()
if 'PGTEST' in os.environ:
from . import open as pg_open
c = pg_open(os.environ['PGTEST']) # Ignoring PGINSTALLATION.
else:
c = self.cluster.connection(user = 'test')
c.connect()
extras = []
sbid = self.format_sandbox_id(os.getpid(), self.sandbox_id + 1)
def new_pg_tmp_connection(l = extras, c = c, sbid = 'sandbox' + str(self.sandbox_id + 1)):
def new_pg_tmp_connection(l = extras, clone = c.clone, sbid = sbid):
# Used to create a new connection that will be closed
# when the context stack is popped along with 'db'.
l.append(c.clone())
l.append(clone())
l[-1].settings['search_path'] = str(sbid) + ',' + l[-1].settings['search_path']

@@ -212,3 +217,3 @@ return l[-1]

def pop(self, exc, drop_schema = 'DROP SCHEMA sandbox{0} CASCADE'.format):
def pop(self, exc, drop_schema = ('DROP SCHEMA {0} CASCADE').format):
local_builtins, extras = self.builtins_stack.pop()

@@ -243,20 +248,25 @@ self.sandbox_id -= 1

# now remove the sandbox schema.
c = self.cluster.connection(user = 'test')
with c:
xdb = local_builtins['db']
with xdb.clone() as c:
# Use a new connection so that the state of
# the context connection will not have to be
# contended with.
c.execute(drop_schema(self.sandbox_id+1))
c.execute(drop_schema(self.format_sandbox_id(os.getpid(), self.sandbox_id + 1)))
else:
# interrupt
# interrupt exception; avoid waiting for close
pass
def _init_c(self, cxn):
cxn.connect()
sb = self.format_sandbox_id(os.getpid(), self.sandbox_id)
cxn.execute('CREATE SCHEMA ' + sb)
cxn.settings['search_path'] = ','.join((sb, cxn.settings['search_path']))
def __enter__(self):
if self.cluster is None:
self.init()
self.push()
try:
db.connect()
db.execute('CREATE SCHEMA sandbox' + str(self.sandbox_id))
db.settings['search_path'] = 'sandbox' + str(self.sandbox_id) + ',' + db.settings['search_path']
self._init_c(builtins.db)
except Exception as e:

@@ -268,6 +278,5 @@ # failed to initialize sandbox schema; pop it.

def __exit__(self, exc, val, tb):
if self.cluster is not None:
self.pop(val)
self.pop(val)
#: The process' temporary cluster.
#: The process' temporary cluster or connection source.
pg_tmp = Temporal()

@@ -58,3 +58,3 @@ ##

if thisread:
self.failUnlessEqual(
self.assertEqual(
last[0][-1][0], next[0][0][0] - 1,

@@ -67,4 +67,4 @@ "first row(-1) of next failed to match the last row of the previous"

break
self.failUnlessEqual(read, limit)
self.failUnlessEqual(list(range(-1, limit)), [
self.assertEqual(read, limit)
self.assertEqual(list(range(-1, limit)), [
x[0] for x in itertools.chain(*completed)

@@ -93,3 +93,3 @@ ])

if thisread:
self.failUnlessEqual(
self.assertEqual(
last[0][-1], next[0][0] - 1,

@@ -102,4 +102,4 @@ "first row(-1) of next failed to match the last row of the previous"

break
self.failUnlessEqual(read, limit)
self.failUnlessEqual(
self.assertEqual(read, limit)
self.assertEqual(
list(range(-1, limit)),

@@ -106,0 +106,0 @@ list(itertools.chain(*completed))

@@ -12,15 +12,12 @@ ##

default_install = installation.default()
if default_install is None:
sys.stderr.write("ERROR: cannot find 'default' pg_config\n")
sys.stderr.write("HINT: set the PGINSTALLATION environment variable to the `pg_config` path\n")
sys.exit(1)
default_installation = installation.default()
class test_cluster(unittest.TestCase):
def setUp(self):
self.cluster = Cluster(default_install, 'test_cluster',)
self.cluster = Cluster(default_installation, 'test_cluster',)
def tearDown(self):
self.cluster.drop()
self.cluster = None
if self.cluster.installation is not None:
self.cluster.drop()
self.cluster = None

@@ -50,2 +47,3 @@ def start_cluster(self, logfile = None):

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def testSilentMode(self):

@@ -71,2 +69,3 @@ self.init()

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def testSuperPassword(self):

@@ -87,4 +86,7 @@ self.init(

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def testNoParameters(self):
'simple init and drop'
"""
Simple init and drop.
"""
self.init()

@@ -91,0 +93,0 @@ self.start_cluster()

@@ -240,3 +240,3 @@ ##

)
def testSelection(self):

@@ -243,0 +243,0 @@ # Sanity

@@ -21,2 +21,3 @@ ##

default_installation = installation.default()

@@ -51,5 +52,6 @@ def check_for_ipv6():

"""
installation = default_installation
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.installation = installation.default()
self.cluster_path = \

@@ -59,9 +61,2 @@ 'pypg_test_' \

if self.installation is None:
sys.stderr.write("ERROR: cannot find 'default' pg_config\n")
sys.stderr.write(
"HINT: set the PGINSTALLATION environment variable to the `pg_config` path\n"
)
sys.exit(1)
self.cluster = pg_cluster.Cluster(

@@ -71,6 +66,9 @@ self.installation,

)
if self.cluster.initialized():
self.cluster.drop()
self.disable_replication = self.installation.version_info[:2] > (9, 6)
@property
def disable_replication(self):
"""
Whether replication settings should be disabled.
"""
return self.installation.version_info[:2] > (9, 6)

@@ -133,21 +131,29 @@ def configure_cluster(self):

def drop_cluster(self):
if self.cluster.initialized():
self.cluster.drop()
def run(self, *args, **kw):
if not self.cluster.initialized():
self.cluster.encoding = 'utf-8'
self.cluster.init(
user = 'test',
encoding = self.cluster.encoding,
logfile = None,
)
sys.stderr.write('*')
try:
atexit.register(self.cluster.drop)
self.configure_cluster()
self.cluster.start(logfile = sys.stdout)
self.cluster.wait_until_started()
self.initialize_database()
except Exception:
self.cluster.drop()
atexit.unregister(self.cluster.drop)
raise
if 'PGINSTALLATION' not in os.environ:
# Expect tests to show skipped.
return super().run(*args, **kw)
# From prior test run?
if self.cluster.initialized():
self.cluster.drop()
self.cluster.encoding = 'utf-8'
self.cluster.init(
user = 'test',
encoding = self.cluster.encoding,
logfile = None,
)
sys.stderr.write('*')
atexit.register(self.drop_cluster)
self.configure_cluster()
self.cluster.start(logfile = sys.stdout)
self.cluster.wait_until_started()
self.initialize_database()
if not self.cluster.running():

@@ -165,3 +171,3 @@ self.cluster.start()

"""
postgresql.driver connectivity tests
postgresql.driver connection tests
"""

@@ -188,6 +194,7 @@ ip6 = '::1'

super().__init__(*args,**kw)
# 8.4 nixed this.
vi = self.cluster.installation.version_info
self.check_crypt_user = (vi < (8,4))
@property
def check_crypt_user(self):
return (self.cluster.installation.version_info < (8,4))
def configure_cluster(self):

@@ -230,2 +237,3 @@ super().configure_cluster()

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_pg_open_SQL_ASCII(self):

@@ -243,2 +251,3 @@ # postgresql.open

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_pg_open_keywords(self):

@@ -285,2 +294,3 @@ host, port = self.cluster.address()

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_pg_open(self):

@@ -368,2 +378,3 @@ # postgresql.open

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_dbapi_connect(self):

@@ -424,2 +435,3 @@ host, port = self.cluster.address()

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_dbapi_connect_failure(self):

@@ -436,2 +448,3 @@ host, port = self.cluster.address()

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_IP4_connect(self):

@@ -448,14 +461,16 @@ C = pg_driver.default.ip4(

if has_ipv6:
def test_IP6_connect(self):
C = pg_driver.default.ip6(
user = 'test',
host = '::1',
database = 'test',
port = self.cluster.address()[1],
**self.params
)
with C() as c:
self.assertEqual(c.prepare('select 1').first(), 1)
@unittest.skipIf(default_installation is None, "no installation provided by environment")
@unittest.skipIf(not has_ipv6, "platform may not support IPv6")
def test_IP6_connect(self):
C = pg_driver.default.ip6(
user = 'test',
host = '::1',
database = 'test',
port = self.cluster.address()[1],
**self.params
)
with C() as c:
self.assertEqual(c.prepare('select 1').first(), 1)
@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_Host_connect(self):

@@ -472,2 +487,3 @@ C = pg_driver.default.host(

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_md5_connect(self):

@@ -483,2 +499,3 @@ c = self.cluster.connection(

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_crypt_connect(self):

@@ -495,2 +512,3 @@ if self.check_crypt_user:

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_password_connect(self):

@@ -505,2 +523,3 @@ c = self.cluster.connection(

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_trusted_connect(self):

@@ -516,2 +535,3 @@ c = self.cluster.connection(

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_Unix_connect(self):

@@ -532,2 +552,3 @@ if not has_unix_sock:

@unittest.skipIf(default_installation is None, "no installation provided by environment")
def test_pg_open_unix(self):

@@ -534,0 +555,0 @@ if not has_unix_sock:

@@ -96,23 +96,17 @@ ##

def setUp(self):
pg_tmp.init()
pg_tmp.push()
pg_tmp._init_c(db)
def tearDown(self):
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1, self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
pg_tmp.pop(None)
def _connect(self):
pg_tmp.init()
host, port = pg_tmp.cluster.address()
return self.driver.connect(
user = 'test', host = host, port = port,
)
c = db.clone()
c.__class__ = self.driver.Connection
c._xact = c.xact()
c._xact.start()
c._dbapi_connected_flag = True
return c

@@ -712,3 +706,3 @@ def test_connect(self):

Should create a procedure called deleteme
that returns two result sets, first the
that returns two result sets, first the
number of rows in booze then "name from booze"

@@ -715,0 +709,0 @@ '''

@@ -541,8 +541,12 @@ ##

db.execute("CREATE TYPE public.myudt AS (i int)")
# Should be pg_temp or sandbox.
schema = db.settings['search_path'].split(',')[0]
typpath = '"%s"."myudt"' %(schema,)
db.execute("CREATE TYPE myudt AS (i int)")
myudt_oid = db.prepare("select oid from pg_type WHERE typname='myudt'").first()
ps = db.prepare("SELECT $1::text AS my_column1, $2::varchar AS my_column2, $3::public.myudt AS my_column3")
ps = db.prepare("SELECT $1::text AS my_column1, $2::varchar AS my_column2, $3::myudt AS my_column3")
self.assertEqual(tuple(ps.column_names), ('my_column1','my_column2', 'my_column3'))
self.assertEqual(tuple(ps.sql_column_types), ('pg_catalog.text', 'CHARACTER VARYING', '"public"."myudt"'))
self.assertEqual(tuple(ps.sql_parameter_types), ('pg_catalog.text', 'CHARACTER VARYING', '"public"."myudt"'))
self.assertEqual(tuple(ps.sql_column_types), ('pg_catalog.text', 'CHARACTER VARYING', typpath))
self.assertEqual(tuple(ps.sql_parameter_types), ('pg_catalog.text', 'CHARACTER VARYING', typpath))
self.assertEqual(tuple(ps.pg_column_types), (

@@ -558,3 +562,3 @@ pg_types.TEXTOID, pg_types.VARCHAROID, myudt_oid)

self.assertEqual(tuple(c.column_names), ('my_column1','my_column2', 'my_column3'))
self.assertEqual(tuple(c.sql_column_types), ('pg_catalog.text', 'CHARACTER VARYING', '"public"."myudt"'))
self.assertEqual(tuple(c.sql_column_types), ('pg_catalog.text', 'CHARACTER VARYING', typpath))
self.assertEqual(tuple(c.pg_column_types), (

@@ -825,2 +829,23 @@ pg_types.TEXTOID, pg_types.VARCHAROID, myudt_oid

@pg_tmp
def testTransactionAlias(self):
self.assertEqual(db.transaction, db.xact)
try:
with db.transaction():
db.execute("CREATE TABLE t (i int);")
raise Exception('some failure')
except:
pass
else:
self.fail("expected exception was not raised")
try:
db.query("select * from t")
except:
# No table.
pass
else:
self.fail("transaction abort had no effect")
def cursor_read(self):

@@ -827,0 +852,0 @@ ps = db.prepare("SELECT i FROM generate_series(0, (2^8)::int - 1) AS g(i)")

@@ -21,2 +21,3 @@ ##

'u:p@h:1',
'postgres://host/database',
'pq://user:password@host:port/database?setting=value#public,private',

@@ -88,2 +89,16 @@ 'pq://fæm.com:123/õéf/á?param=val',

class test_iri(unittest.TestCase):
def testAlternateSchemes(self):
field = pg_iri.parse("postgres://host")['host']
self.assertEqual(field, 'host')
field = pg_iri.parse("postgresql://host")['host']
self.assertEqual(field, 'host')
try:
pg_iri.parse("reject://host")
except ValueError:
pass
else:
self.fail("unacceptable IRI scheme not rejected")
def testIP6Hosts(self):

@@ -106,3 +121,5 @@ """

def testPresentPasswordObscure(self):
"password is present in IRI, and obscure it"
"""
Password is present in IRI, and obscure it.
"""
s = 'pq://user:pass@host:port/dbname'

@@ -115,3 +132,5 @@ o = 'pq://user:***@host:port/dbname'

def testPresentPasswordObscure(self):
"password is *not* present in IRI, and do nothing"
"""
Password is *not* present in IRI, and do nothing.
"""
s = 'pq://user@host:port/dbname'

@@ -118,0 +137,0 @@ o = 'pq://user@host:port/dbname'

@@ -13,2 +13,8 @@ ##

default_installation = test_connect.default_installation
has_ssl = False
if default_installation is not None:
has_ssl = default_installation.ssl
server_key = """

@@ -108,2 +114,5 @@ -----BEGIN RSA PRIVATE KEY-----

def configure_cluster(self):
if not has_ssl:
return
super().configure_cluster()

@@ -131,2 +140,5 @@ self.cluster.settings['ssl'] = 'on'

def initialize_database(self):
if not has_ssl:
return
super().initialize_database()

@@ -141,2 +153,4 @@ with self.cluster.connection(user = 'test') as db:

@unittest.skipIf(default_installation is None, "no installation provided by environment")
@unittest.skipIf(not has_ssl, "could not detect installation tls")
def test_ssl_mode_require(self):

@@ -174,2 +188,4 @@ host, port = self.cluster.address()

@unittest.skipIf(default_installation is None, "no installation provided by environment")
@unittest.skipIf(not has_ssl, "could not detect installation tls")
def test_ssl_mode_disable(self):

@@ -208,2 +224,4 @@ host, port = self.cluster.address()

@unittest.skipIf(default_installation is None, "no installation provided by environment")
@unittest.skipIf(not has_ssl, "could not detect installation tls")
def test_ssl_mode_prefer(self):

@@ -242,2 +260,4 @@ host, port = self.cluster.address()

@unittest.skipIf(default_installation is None, "no installation provided by environment")
@unittest.skipIf(not has_ssl, "could not detect installation tls")
def test_ssl_mode_allow(self):

@@ -244,0 +264,0 @@ host, port = self.cluster.address()

@@ -20,9 +20,5 @@ ##

# These two require custom cluster configurations.
# Expects PGINSTALLATION to be set. Tests may be skipped.
from .test_connect import *
# No SSL? cluster initialization will fail.
if default().ssl:
from .test_ssl_connect import *
else:
stderr.write("NOTICE: installation doesn't support SSL\n")
from .test_ssl_connect import *

@@ -29,0 +25,0 @@ try:

@@ -289,5 +289,5 @@ ##

def from_elements(typ,
elements : "iterable of elements in the array",
lowerbounds : "beginning of each axis" = None,
upperbounds : "upper bounds; size of each axis" = None,
elements,
lowerbounds = None,
upperbounds = None,
len = len,

@@ -294,0 +294,0 @@ ):

@@ -98,5 +98,2 @@ import math

def intersect(self, ob):
raise NotImplementedError
def perpendicular(self, ob):

@@ -121,3 +118,3 @@ return (self.slope / type(self)(ob).slope) == -1.0

::
(-2, 0) `high`

@@ -175,3 +172,3 @@ |

"""
type for PostgreSQL circles
Type for PostgreSQL circles.
"""

@@ -178,0 +175,0 @@ __slots__ = ()

@@ -5,14 +5,12 @@ ##

"""
PostgreSQL version parsing.
PostgreSQL version string parsing.
>>> postgresql.version.split('8.0.1')
>>> postgresql.versionstring.split('8.0.1')
(8, 0, 1, None, None)
"""
def split(vstr : str) -> (
'major','minor','patch',...,'state_class','state_level'
):
def split(vstr: str) -> tuple:
"""
Split a PostgreSQL version string into a tuple
(major,minor,patch,...,state_class,state_level)
Split a PostgreSQL version string into a tuple.
(major, minor, patch, ..., state_class, state_level)
"""

@@ -42,20 +40,18 @@ v = vstr.strip().split('.')

def unsplit(vtup : tuple) -> str:
'join a version tuple back into the original version string'
def unsplit(vtup: tuple) -> str:
"""
Join a version tuple back into the original version string.
"""
svtup = [str(x) for x in vtup[:-2] if x is not None]
state_class, state_level = vtup[-2:]
return '.'.join(svtup) + (
'' if state_class is None else state_class + str(state_level)
)
return '.'.join(svtup) + ('' if state_class is None else state_class + str(state_level))
def normalize(split_version : "a tuple returned by `split`") -> tuple:
def normalize(split_version: tuple) -> tuple:
"""
Given a tuple produced by `split`, normalize the `None` objects into int(0)
or 'final' if it's the ``state_class``
or 'final' if it's the ``state_class``.
"""
(*head, state_class, state_level) = split_version
mmp = [x if x is not None else 0 for x in head]
return tuple(
mmp + [state_class or 'final', state_level or 0]
)
return tuple(mmp + [state_class or 'final', state_level or 0])

@@ -62,0 +58,0 @@ default_state_class_priority = [

Metadata-Version: 2.1
Name: py-postgresql
Version: 1.2.2
Version: 1.3.0
Summary: PostgreSQL driver and tools library.

@@ -10,51 +10,2 @@ Home-page: http://github.com/python-postgres/fe

Maintainer-email: james.pye@gmail.com
License: UNKNOWN
Description:
.. warning::
In v1.3, `postgresql.driver.dbapi20.connect` will now raise `ClientCannotConnectError` directly.
Exception traps around connect should still function, but the `__context__` attribute
on the error instance will be `None` in the usual failure case as it is no longer
incorrectly chained. Trapping `ClientCannotConnectError` ahead of `Error` should
allow both cases to co-exist in the event that data is being extracted from
the `ClientCannotConnectError`.
py-postgresql is a set of Python modules providing interfaces to various parts
of PostgreSQL. Primarily, it provides a pure-Python driver with some C optimizations for
querying a PostgreSQL database.
http://github.com/python-postgres/fe
Features:
* Prepared Statement driven interfaces.
* Cluster tools for creating and controlling a cluster.
* Support for most PostgreSQL types: composites, arrays, numeric, lots more.
* COPY support.
Sample PG-API Code::
>>> import postgresql
>>> db = postgresql.open('pq://user:password@host:port/database')
>>> db.execute("CREATE TABLE emp (emp_first_name text, emp_last_name text, emp_salary numeric)")
>>> make_emp = db.prepare("INSERT INTO emp VALUES ($1, $2, $3)")
>>> make_emp("John", "Doe", "75,322")
>>> with db.xact():
... make_emp("Jane", "Doe", "75,322")
... make_emp("Edward", "Johnson", "82,744")
...
There is a DB-API 2.0 module as well::
postgresql.driver.dbapi20
However, PG-API is recommended as it provides greater utility.
Once installed, try out the ``pg_python`` console script::
$ python3 -m postgresql.bin.pg_python -h localhost -p port -U theuser -d database_name
If a successful connection is made to the remote host, it will provide a Python
console with the database connection bound to the `db` name.
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable

@@ -71,2 +22,52 @@ Classifier: Intended Audience :: Developers

Classifier: Topic :: Database
Requires-Python: >=3.8
Description-Content-Type: text/x-rst
License-File: LICENSE
License-File: AUTHORS
.. warning::
In v1.3, `postgresql.driver.dbapi20.connect` will now raise `ClientCannotConnectError` directly.
Exception traps around connect should still function, but the `__context__` attribute
on the error instance will be `None` in the usual failure case as it is no longer
incorrectly chained. Trapping `ClientCannotConnectError` ahead of `Error` should
allow both cases to co-exist in the event that data is being extracted from
the `ClientCannotConnectError`.
py-postgresql is a set of Python modules providing interfaces to various parts
of PostgreSQL. Primarily, it provides a pure-Python driver with some C optimizations for
querying a PostgreSQL database.
http://github.com/python-postgres/fe
Features:
* Prepared Statement driven interfaces.
* Cluster tools for creating and controlling a cluster.
* Support for most PostgreSQL types: composites, arrays, numeric, lots more.
* COPY support.
Sample PG-API Code::
>>> import postgresql
>>> db = postgresql.open('pq://user:password@host:port/database')
>>> db.execute("CREATE TABLE emp (emp_first_name text, emp_last_name text, emp_salary numeric)")
>>> make_emp = db.prepare("INSERT INTO emp VALUES ($1, $2, $3)")
>>> make_emp("John", "Doe", "75,322")
>>> with db.xact():
... make_emp("Jane", "Doe", "75,322")
... make_emp("Edward", "Johnson", "82,744")
...
There is a DB-API 2.0 module as well::
postgresql.driver.dbapi20
However, PG-API is recommended as it provides greater utility.
Once installed, try out the ``pg_python`` console script::
$ python3 -m postgresql.bin.pg_python -h localhost -p port -U theuser -d database_name
If a successful connection is made to the remote host, it will provide a Python
console with the database connection bound to the `db` name.
AUTHORS
LICENSE
MANIFEST.in
README
README.md
setup.cfg
setup.py

@@ -15,2 +16,3 @@ postgresql/__init__.py

postgresql/installation.py
postgresql/iri-bu.py
postgresql/iri.py

@@ -35,2 +37,3 @@ postgresql/message.py

postgresql/documentation/sphinx/changes-v1.2.rst
postgresql/documentation/sphinx/changes-v1.3.rst
postgresql/documentation/sphinx/clientparameters.rst

@@ -37,0 +40,0 @@ postgresql/documentation/sphinx/cluster.rst

About
=====
py-postgresql is a Python 3 package providing modules for working with PostgreSQL.
This includes a high-level driver, and many other tools that support a developer
working with PostgreSQL databases.
For a high performance async interface, MagicStack's asyncpg
http://github.com/MagicStack/asyncpg should be considered.
py-postgresql, currently, does not have direct support for high-level async
interfaces provided by recent versions of Python. Future versions may change this.
Errata
------
.. warning::
In v1.3, `postgresql.driver.dbapi20.connect` will now raise `ClientCannotConnectError` directly.
Exception traps around connect should still function, but the `__context__` attribute
on the error instance will be `None` in the usual failure case as it is no longer
incorrectly chained. Trapping `ClientCannotConnectError` ahead of `Error` should
allow both cases to co-exist in the event that data is being extracted from
the `ClientCannotConnectError`.
Installation
------------
Installation *should* be as simple as::
$ python3 ./setup.py install
More information about installation is available via::
python -m postgresql.documentation.admin
Basic Driver Usage
------------------
Using PG-API::
>>> import postgresql
>>> db = postgresql.open('pq://user:password@host:port/database')
>>> get_table = db.prepare("select * from information_schema.tables where table_name = $1")
>>> for x in get_table("tables"):
>>> print(x)
>>> print(get_table.first("tables"))
However, a DB-API 2.0 driver is provided as well: `postgresql.driver.dbapi20`.
Further Information
-------------------
Online documentation can be retrieved from:
http://py-postgresql.readthedocs.io
Or, you can read them in your pager: python -m postgresql.documentation.index
For information about PostgreSQL:
http://postgresql.org
For information about Python:
http://python.org

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display