🚨 Active Supply Chain Attack:node-ipc Package Compromised.Learn More
Socket
Book a DemoSign in
Socket

processing

Package Overview
Dependencies
Maintainers
2
Versions
18
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

processing - pypi Package Compare versions

Comparing version
0.40
to
0.50
examples/__init__.py
+235
#
# Simple benchmarks for the processing package
#
import time, sys, processing, threading, Queue, gc
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in xrange(iterations):
q.put(a)
# q.putmany((a for i in xrange(iterations))
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through the queue in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in xrange(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = processing.Pipe()
cond = processing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = processing.Process(target=pipe_func, args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through connection in',elapsed,'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
a = seq[5]
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in xrange(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in xrange(iterations):
c.notify()
c.wait()
elapsed = _timer()-t
c.release()
p.join()
print iterations * 2, 'waits in', elapsed, 'seconds'
print 'average number/sec:', iterations * 2 / elapsed
####
def test():
manager = processing.Manager()
gc.disable()
print '\n\t######## testing Queue.Queue\n'
test_queuespeed(threading.Thread, Queue.Queue(),
threading.Condition())
print '\n\t######## testing processing.Queue\n'
test_queuespeed(processing.Process, processing.Queue(),
processing.Condition())
print '\n\t######## testing Queue managed by server process\n'
test_queuespeed(processing.Process, manager.Queue(),
manager.Condition())
print '\n\t######## testing processing.Pipe\n'
test_pipespeed()
print
print '\n\t######## testing list\n'
test_seqspeed(range(10))
print '\n\t######## testing list managed by server process\n'
test_seqspeed(manager.list(range(10)))
print '\n\t######## testing Array("i", ..., lock=False)\n'
test_seqspeed(processing.Array('i', range(10), lock=False))
print '\n\t######## testing Array("i", ..., lock=True)\n'
test_seqspeed(processing.Array('i', range(10), lock=True))
print
print '\n\t######## testing threading.Lock\n'
test_lockspeed(threading.Lock())
print '\n\t######## testing threading.RLock\n'
test_lockspeed(threading.RLock())
print '\n\t######## testing processing.Lock\n'
test_lockspeed(processing.Lock())
print '\n\t######## testing processing.RLock\n'
test_lockspeed(processing.RLock())
print '\n\t######## testing lock managed by server process\n'
test_lockspeed(manager.Lock())
print '\n\t######## testing rlock managed by server process\n'
test_lockspeed(manager.RLock())
print
print '\n\t######## testing threading.Condition\n'
test_conditionspeed(threading.Thread, threading.Condition())
print '\n\t######## testing processing.Condition\n'
test_conditionspeed(processing.Process, processing.Condition())
print '\n\t######## testing condition managed by a server process\n'
test_conditionspeed(processing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
processing.freezeSupport()
test()
#
# This module shows how to use arbitrary callables with a subclass of
# `BaseManager`.
#
from processing import freezeSupport
from processing.managers import BaseManager, BaseProxy, CreatorMethod
##
class Foo(object):
def f(self):
print 'you called Foo.f()'
def g(self):
print 'you called Foo.g()'
def _h(self):
print 'you called Foo._h()'
# A simple generator function
def baz():
for i in xrange(10):
yield i*i
# Proxy type for generator objects
class GeneratorProxy(BaseProxy):
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
##
class MyManager(BaseManager):
# register the Foo class; make all public methods accessible via proxy
Foo1 = CreatorMethod(Foo)
# register the Foo class; make only `g()` and `_h()` accessible via proxy
Foo2 = CreatorMethod(Foo, exposed=('g', '_h'))
# register the generator function baz; use `GeneratorProxy` to make proxies
baz = CreatorMethod(baz, proxytype=GeneratorProxy)
##
def test():
manager = MyManager()
manager.start()
print '-' * 20
f1 = manager.Foo1()
f1.f()
f1.g()
assert not hasattr(f1, '_h')
print '-' * 20
f2 = manager.Foo2()
f2.g()
f2._h()
assert not hasattr(f2, 'f')
print '-' * 20
it = manager.baz()
for i in it:
print '<%d>' % i,
print
##
if __name__ == '__main__':
freezeSupport()
test()
#
# A test of `processing.Pool` class
#
from processing import Pool, TimeoutError
from processing import cpuCount, currentProcess, freezeSupport, activeChildren
import time, random, sys
#
# Functions used by test code
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(currentProcess().getName(), func.__name__, args, result)
def calculatestar(args):
return calculate(*args)
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
def f(x):
return 1.0 / (x-5.0)
def pow3(x):
return x**3
def noop(x):
pass
#
# Test code
#
def test():
print 'cpuCount() = %d\n' % cpuCount()
#
# Create pool
#
PROCESSES = 4
print 'Creating pool with %d processes\n' % PROCESSES
pool = Pool(PROCESSES)
#
# Tests
#
TASKS = [(mul, (i, 7)) for i in range(10)] + \
[(plus, (i, 8)) for i in range(10)]
results = [pool.apply_async(calculate, t) for t in TASKS]
imap_it = pool.imap(calculatestar, TASKS)
imap_unordered_it = pool.imap_unordered(calculatestar, TASKS)
print 'Ordered results using pool.apply_async():'
for r in results:
print '\t', r.get()
print
print 'Ordered results using pool.imap():'
for x in imap_it:
print '\t', x
print
print 'Unordered results using pool.imap_unordered():'
for x in imap_unordered_it:
print '\t', x
print
print 'Ordered results using pool.map() --- will block till complete:'
for x in pool.map(calculatestar, TASKS):
print '\t', x
print
#
# Simple benchmarks
#
N = 100000
print 'def pow3(x): return x**3'
t = time.time()
A = map(pow3, xrange(N))
print '\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
t = time.time()
B = pool.map(pow3, xrange(N))
print '\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
t = time.time()
C = list(pool.imap(pow3, xrange(N), chunksize=N//8))
print '\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \
' seconds' % (N, N//8, time.time() - t)
assert A == B == C, (len(A), len(B), len(C))
print
L = [None] * 1000000
print 'def noop(x): pass'
print 'L = [None] * 1000000'
t = time.time()
A = map(noop, L)
print '\tmap(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
t = time.time()
B = pool.map(noop, L)
print '\tpool.map(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
t = time.time()
C = list(pool.imap(noop, L, chunksize=len(L)//8))
print '\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \
(len(L)//8, time.time() - t)
assert A == B == C, (len(A), len(B), len(C))
print
del A, B, C, L
#
# Test error handling
#
print 'Testing error handling:'
try:
print pool.apply(f, (5,))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.apply()'
else:
raise AssertionError, 'expected ZeroDivisionError'
try:
print pool.map(f, range(10))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.map()'
else:
raise AssertionError, 'expected ZeroDivisionError'
try:
print list(pool.imap(f, range(10)))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from list(pool.imap())'
else:
raise AssertionError, 'expected ZeroDivisionError'
it = pool.imap(f, range(10))
for i in range(10):
try:
x = it.next()
except ZeroDivisionError:
if i == 5:
pass
except StopIteration:
break
else:
if i == 5:
raise AssertionError, 'expected ZeroDivisionError'
assert i == 9
print '\tGot ZeroDivisionError as expected from IMapIterator.next()'
print
#
# Testing timeouts
#
print 'Testing ApplyResult.get() with timeout:',
res = pool.apply_async(calculate, TASKS[0])
while 1:
sys.stdout.flush()
try:
sys.stdout.write('\n\t%s' % res.get(0.02))
break
except TimeoutError:
sys.stdout.write('.')
print
print
print 'Testing IMapIterator.next() with timeout:',
it = pool.imap(calculatestar, TASKS)
while 1:
sys.stdout.flush()
try:
sys.stdout.write('\n\t%s' % it.next(0.02))
except StopIteration:
break
except TimeoutError:
sys.stdout.write('.')
print
print
#
# Testing callback
#
print 'Testing callback:'
A = []
B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]
r = pool.apply_async(mul, (7, 8), callback=A.append)
r.wait()
r = pool.map_async(pow3, range(10), callback=A.extend)
r.wait()
if A == B:
print '\tcallbacks succeeded\n'
else:
print '\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)
#
# Check there are no outstanding tasks
#
assert not pool._cache, 'cache = %r' % pool._cache
#
# Check close() methods
#
print 'Testing close():'
for worker in pool._pool:
assert worker.isAlive()
result = pool.apply_async(time.sleep, [0.5])
pool.close()
pool.join()
assert result.get() is None
for worker in pool._pool:
assert not worker.isAlive()
print '\tclose() succeeded\n'
#
# Check terminate() method
#
print 'Testing terminate():'
pool = Pool(2)
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [10]) for i in range(10)]
pool.terminate()
pool.join()
for worker in pool._pool:
assert not worker.isAlive()
print '\tterminate() succeeded\n'
#
# Check garbage collection
#
print 'Testing garbage collection:'
pool = Pool(2)
processes = pool._pool
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [10]) for i in range(10)]
del results, pool
time.sleep(0.2)
for worker in processes:
assert not worker.isAlive()
print '\tgarbage collection succeeded\n'
if __name__ == '__main__':
freezeSupport()
test()
#
# A test file for the `processing` package
#
import time, sys, random
from Queue import Empty
import processing # may get overwritten
#### TEST_VALUE
def value_func(running, mutex):
random.seed()
time.sleep(random.random()*4)
mutex.acquire()
print '\n\t\t\t' + str(processing.currentProcess()) + ' has finished'
running.value -= 1
mutex.release()
def test_value():
TASKS = 10
running = processing.Value('i', TASKS)
mutex = processing.Lock()
for i in range(TASKS):
processing.Process(target=value_func, args=(running, mutex)).start()
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print running.value,
sys.stdout.flush()
mutex.release()
print
print 'No more running processes'
#### TEST_QUEUE
def queue_func(queue):
for i in range(30):
time.sleep(0.5 * random.random())
queue.put(i*i)
queue.put('STOP')
def test_queue():
q = processing.Queue()
p = processing.Process(target=queue_func, args=(q,))
p.start()
o = None
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print o,
sys.stdout.flush()
except Empty:
print 'TIMEOUT'
print
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print '\t' + str(cond)
time.sleep(2)
print '\tchild is notifying'
print '\t' + str(cond)
cond.notify()
cond.release()
def test_condition():
cond = processing.Condition()
p = processing.Process(target=condition_func, args=(cond,))
print cond
cond.acquire()
print cond
cond.acquire()
print cond
p.start()
print 'main is waiting'
cond.wait()
print 'main has woken up'
print cond
cond.release()
print cond
cond.release()
p.join()
print cond
#### TEST_SEMAPHORE
def semaphore_func(sema, mutex, running):
sema.acquire()
mutex.acquire()
running.value += 1
print running.value, 'tasks are running'
mutex.release()
random.seed()
time.sleep(random.random()*2)
mutex.acquire()
running.value -= 1
print '%s has finished' % processing.currentProcess()
mutex.release()
sema.release()
def test_semaphore():
sema = processing.Semaphore(3)
mutex = processing.RLock()
running = processing.Value('i', 0)
processes = [
processing.Process(target=semaphore_func, args=(sema, mutex, running))
for i in range(10)
]
for p in processes:
p.start()
for p in processes:
p.join()
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print '\tchild sleeping'
time.sleep(5.5)
print '\n\tchild terminating'
def test_join_timeout():
p = processing.Process(target=join_timeout_func)
p.start()
print 'waiting for process to finish'
while 1:
p.join(timeout=1)
if not p.isAlive():
break
print '.',
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print '\t%r is waiting' % processing.currentProcess()
event.wait()
print '\t%r has woken up' % processing.currentProcess()
def test_event():
event = processing.Event()
processes = [processing.Process(target=event_func, args=(event,))
for i in range(5)]
for p in processes:
p.start()
print 'main is sleeping'
time.sleep(2)
print 'main is setting event'
event.set()
for p in processes:
p.join()
#### TEST_SHAREDVALUES
def sharedvalues_func(values, arrays, shared_values, shared_arrays):
for i in range(len(values)):
v = values[i][1]
sv = shared_values[i].value
assert v == sv
for i in range(len(values)):
a = arrays[i][1]
sa = list(shared_arrays[i][:])
assert a == sa
print 'Tests passed'
def test_sharedvalues():
values = [
('i', 10),
('h', -2),
('d', 1.25)
]
arrays = [
('i', range(100)),
('d', [0.25 * i for i in range(100)]),
('H', range(1000))
]
shared_values = [processing.Value(id, v) for id, v in values]
shared_arrays = [processing.Array(id, a) for id, a in arrays]
p = processing.Process(
target=sharedvalues_func,
args=(values, arrays, shared_values, shared_arrays)
)
p.start()
p.join()
assert p.getExitCode() == 0
####
def test(namespace=processing):
global processing
processing = namespace
for func in [ test_value, test_queue, test_condition,
test_semaphore, test_join_timeout, test_event,
test_sharedvalues ]:
print '\n\t######## %s\n' % func.__name__
func()
ignore = processing.activeChildren() # cleanup any old processes
if hasattr(processing, '_debug_info'):
info = processing._debug_info()
if info:
print info
raise ValueError, 'there should be no positive refcounts left'
if __name__ == '__main__':
processing.freezeSupport()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print ' Using processes '.center(79, '-')
namespace = processing
elif sys.argv[1] == 'manager':
print ' Using processes and a manager '.center(79, '-')
namespace = processing.Manager()
namespace.Process = processing.Process
namespace.currentProcess = processing.currentProcess
namespace.activeChildren = processing.activeChildren
elif sys.argv[1] == 'threads':
print ' Using threads '.center(79, '-')
import processing.dummy as namespace
else:
print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0]
raise SystemExit, 2
test(namespace)
#
# Example where a pool of http servers share a single listening socket
#
# On Windows this module depends on the ability to pickle a socket
# object so that the worker processes can inherit a copy of the server
# object. (We import `processing.reduction` to enable this pickling.)
#
# Not sure if we should synchronize access to `socket.accept()` method by
# using a process-shared lock -- does not seem to be necessary.
#
import os
import sys
from processing import Process, currentProcess, freezeSupport
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
if sys.platform == 'win32':
import processing.reduction # make sockets pickable/inheritable
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (currentProcess().getName(), format%args))
class RequestHandler(SimpleHTTPRequestHandler):
# we override log_message() to show which process is handling the request
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, RequestHandler)
# create child processes to act as workers
for i in range(number_of_processes-1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
def test():
DIR = os.path.join(os.path.dirname(__file__), '..')
ADDRESS = ('localhost', 8000)
NUMBER_OF_PROCESSES = 4
print 'Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)
print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']
os.chdir(DIR)
runpool(ADDRESS, NUMBER_OF_PROCESSES)
if __name__ == '__main__':
freezeSupport()
test()
#
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
import time
import random
from processing import currentProcess, Process, freezeSupport
from processing import Queue
#
# Function run by worker processes
#
def worker(input, output):
for item in iter(input.get, 'STOP'):
func, args = item
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(currentProcess().getName(), func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
task_queue.putmany(TASKS1)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS1)):
print '\t', done_queue.get()
# Add more tasks using `put()` instead of `putmany()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freezeSupport()
test()
#
# Module for starting a process object using os.fork() or CreateProcess()
#
# processing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import os
import sys
import signal
__all__ = ['Popen', 'PicklableOnlyForInheritance', 'assert_spawning', 'exit']
#
# Base for classes which should not be pickled except to enable inheritance
#
class PicklableOnlyForInheritance(object):
def __getstate__(self):
assert_spawning(self)
return self._state
def __setstate__(self, state):
self._setstate(state)
#
# Unix
#
if sys.platform != 'win32':
import time
exit = os._exit
def assert_spawning(self):
raise RuntimeError, \
('%s objects should only be shared between '
'processes through inheritance' % type(self).__name__)
class Popen(object):
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def wait(self):
return self.poll(0)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
pid, sts = os.waitpid(self.pid, flag)
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait_timeout(self, timeout):
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError:
if self.returncode is not None:
raise
#
# Windows
#
else:
import imp, weakref, thread, msvcrt, _subprocess, processing
from os.path import dirname, splitext, basename, abspath
from cPickle import dump, load, HIGHEST_PROTOCOL
from processing._processing import win32
from processing.logger import subwarning
from processing.finalize import Finalize
exit = win32.ExitProcess
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
#
#
#
_state_lock = thread.allocate_lock()
_spawning = False
def assert_spawning(self):
if not _spawning:
raise RuntimeError, \
('%s objects should only be shared between '
'processes through inheritance' % type(self).__name__)
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument, and which
# has terminate() and wait_timeout() methods.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
def __init__(self, process_obj):
global _spawning
# create pipe for communication with child
r, w = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = msvcrt.get_osfhandle(r)
win32.SetHandleInformation(
rhandle, win32.HANDLE_FLAG_INHERIT, win32.HANDLE_FLAG_INHERIT
)
# start process
cmd = get_commandline() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
sys.executable, cmd, None, None, 1, 0, None, None, None
)
os.close(r)
ht.Close()
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(w, 'wb')
_state_lock.acquire()
try:
_spawning = True
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
_spawning = False
_state_lock.release()
to_child.close()
def wait_timeout(self, timeout):
if self.returncode is None:
millisecs = int(timeout * 1000)
res = _subprocess.WaitForSingleObject(self._handle, millisecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def wait(self):
res = None
while res is None:
res = self.wait_timeout(1)
return res
def poll(self):
return self.wait_timeout(0)
def terminate(self):
if self.returncode is None:
try:
win32.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.returncode is not None:
raise
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--processing-fork':
assert len(argv) == 3
return True
else:
return False
def freezeSupport():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_commandline():
'''
Returns prefix of commandline used for spawning a child process
'''
if processing.currentProcess()._identity==() and is_forking(sys.argv):
raise RuntimeError, '''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freezeSupport()
...
The "freezeSupport()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.'''
prog = 'from processing.forking import main; main()'
if getattr(sys, 'frozen', False):
return [sys.executable, '--processing-fork']
elif sys.executable.lower().endswith('pythonservice.exe'):
exe = os.path.join(os.path.dirname(os.__file__),'..','python.exe')
return [exe, '-c', prog, '--processing-fork']
else:
return [sys.executable, '-c', prog, '--processing-fork']
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
if sys.argv[0] not in ('', '-c') and not WINEXE:
mainpath = getattr(sys.modules['__main__'], '__file__', None)
if mainpath is not None and not os.path.isabs(mainpath):
# we will assume os.chdir() was not used between program
# start up and the first import of processing
mainpath = os.path.join(processing.ORIGINAL_DIR, mainpath)
else:
mainpath = None
return [name, mainpath, sys.path, sys.argv, None,
processing.ORIGINAL_DIR]
def prepare(name, mainpath, sys_path, sys_argv,
cur_dir=None, orig_dir=None):
'''
Try get this process ready to unpickle process object
'''
global original_main_module
original_main_module = sys.modules['__main__']
processing.currentProcess().setName(name)
if orig_dir is not None:
processing.ORIGINAL_DIR = orig_dir
if cur_dir is not None:
try:
os.chdir(cur_dir)
except OSError:
raise
if sys_path is not None:
sys.path = sys_path
if mainpath is not None:
mainname = splitext(basename(mainpath))[0]
if mainname == '__init__':
mainname = basename(dirname(mainpath))
if not mainpath.lower().endswith('.exe') and mainname != 'ipython':
if mainpath is None:
dirs = None
elif basename(mainpath).startswith('__init__.py'):
dirs = [dirname(dirname(mainpath))]
else:
dirs = [dirname(mainpath)]
assert mainname not in sys.modules, mainname
file, pathname, etc = imp.find_module(mainname, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, pathname, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# XXX Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- ugly
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
if sys_argv is not None: # this needs to come last
sys.argv = sys_argv
def main():
'''
Run code specifed by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
processing.currentProcess()._inheriting = True
preparation_data = load(from_parent)
prepare(*preparation_data)
self = load(from_parent)
processing.currentProcess()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
win32.ExitProcess(exitcode)
/*
* Extension module used by `processing` package
*
* processing.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "processing.h"
PyObject *create_win32_namespace(void);
PyObject *dumpsFunction, *loadsFunction, *protocol;
PyObject *ProcessError, *BufferTooShort;
/*
* Function which raises exceptions based on error codes
*/
PyObject *
SetException(PyObject *Type, int num)
{
switch (num) {
#ifdef MS_WINDOWS
case STANDARD_ERROR:
if (Type == NULL)
Type = PyExc_WindowsError;
PyErr_SetExcFromWindowsErr(Type, 0);
break;
case WSA_ERROR:
if (Type == NULL)
Type = PyExc_WindowsError;
PyErr_SetExcFromWindowsErr(Type, WSAGetLastError());
break;
#else /* !MS_WINDOWS */
case STANDARD_ERROR:
if (Type == NULL)
Type = PyExc_OSError;
PyErr_SetFromErrno(PyExc_IOError);
break;
#endif /* !MS_WINDOWS */
case MEMORY_ERROR:
PyErr_NoMemory();
break;
case END_OF_FILE:
PyErr_SetNone(PyExc_EOFError);
break;
case EARLY_END_OF_FILE:
PyErr_SetString(PyExc_IOError, "got end of file during message");
break;
case BAD_MESSAGE_LENGTH:
PyErr_SetString(PyExc_IOError, "bad message length");
break;
case EXCEPTION_HAS_BEEN_SET:
return NULL;
default:
PyErr_Format(PyExc_RuntimeError, "unkown number: %d", num);
}
return NULL;
}
/*
* Windows only
*/
#ifdef MS_WINDOWS
/* On Windows we set an event to signal Ctrl-C; compare with timemodule.c */
HANDLE hInterruptEvent = NULL;
long main_thread = 0;
static BOOL WINAPI
ProcessingCtrlHandler(DWORD dwCtrlType)
{
SetEvent(hInterruptEvent);
return FALSE;
}
/* Duplicate a handle -- also works on windows sockets */
HANDLE
duplicate_handle(HANDLE h)
{
HANDLE dup_h;
BOOL success = DuplicateHandle(
GetCurrentProcess(), h, GetCurrentProcess(),
&dup_h, 0, FALSE, DUPLICATE_SAME_ACCESS
);
return success ? dup_h : INVALID_HANDLE_VALUE;
}
/* On Windows we provide alternative to socket.fromfd() */
#if defined(MS_WINDOWS) && PY_VERSION_HEX < 0x02060000
typedef struct {
PyObject_HEAD
SOCKET sock_fd;
int sock_family;
int sock_type;
int sock_proto;
PyObject *(*errorhandler)(void);
double sock_timeout;
} PySocketSockObject;
PyObject *
processing_changefd(PyObject *self, PyObject *args)
{
PySocketSockObject *s;
int family, type, proto=0;
SOCKET fd, newfd;
if (!PyArg_ParseTuple(args, "Oiii|i", &s, &fd, &family, &type, &proto))
return NULL;
/* Note INVALID_HANDLE_VALUE == INVALID_SOCKET == -1 (modulo casting) */
newfd = (SOCKET)duplicate_handle((HANDLE)fd);
if (newfd == INVALID_SOCKET) {
PyErr_SetString(PyExc_OSError, "failed to duplicate socket handle");
return NULL;
}
if (s->sock_fd != INVALID_SOCKET) {
Py_BEGIN_ALLOW_THREADS
closesocket(s->sock_fd);
Py_END_ALLOW_THREADS
}
s->sock_fd = newfd;
s->sock_family = family;
s->sock_type = type;
s->sock_proto = proto;
Py_RETURN_NONE;
}
#endif /* defined(MS_WINDOWS) && PY_VERSION_HEX < 0x02060000 */
/*
* Unix only
*/
#else /* !MS_WINDOWS */
#if HAVE_FD_TRANSFER
/* Functions for transferring file descriptors between processes.
Reimplements some of the functionality of the `fdcred`
module at `http://www.mca-ltd.com/resources/fdcred_1.tgz`. */
struct fd_control_message {
struct cmsghdr hdr;
int fd;
};
static PyObject *
processing_sendfd(PyObject *self, PyObject *args)
{
int conn, fd, res;
char dummy_char;
struct fd_control_message fdmsg;
struct iovec dummy_iov;
struct msghdr msg = {0};
if (!PyArg_ParseTuple(args, "ii", &conn, &fd))
return NULL;
fdmsg.fd = fd;
fdmsg.hdr.cmsg_level = SOL_SOCKET;
fdmsg.hdr.cmsg_type = SCM_RIGHTS;
fdmsg.hdr.cmsg_len = sizeof(fdmsg);
dummy_iov.iov_base = &dummy_char;
dummy_iov.iov_len = 1;
msg.msg_control = &fdmsg;
msg.msg_controllen = sizeof(fdmsg);
msg.msg_iov = &dummy_iov;
msg.msg_iovlen = 1;
Py_BEGIN_ALLOW_THREADS
res = sendmsg(conn, &msg, 0);
Py_END_ALLOW_THREADS
if (res < 0)
return PyErr_SetFromErrno(PyExc_IOError);
Py_RETURN_NONE;
}
static PyObject *
processing_recvfd(PyObject *self, PyObject *args)
{
int conn, res;
char dummy_char;
struct fd_control_message fdmsg;
struct iovec dummy_iov;
struct msghdr msg = {0};
if (!PyArg_ParseTuple(args, "i", &conn))
return NULL;
fdmsg.fd = -1;
fdmsg.hdr.cmsg_level = SOL_SOCKET;
fdmsg.hdr.cmsg_type = SCM_RIGHTS;
fdmsg.hdr.cmsg_len = sizeof(fdmsg);
dummy_iov.iov_base = &dummy_char;
dummy_iov.iov_len = 1;
msg.msg_control = &fdmsg;
msg.msg_controllen = sizeof(fdmsg);
msg.msg_iov = &dummy_iov;
msg.msg_iovlen = 1;
Py_BEGIN_ALLOW_THREADS
res = recvmsg(conn, &msg, 0);
Py_END_ALLOW_THREADS
if (res < 0)
return PyErr_SetFromErrno(PyExc_IOError);
return Py_BuildValue("i", fdmsg.fd);
}
#endif /* HAVE_FD_TRANSFER */
#endif /* !MS_WINDOWS */
/*
* All platforms
*/
static PyObject*
processing_rwbuffer(PyObject *self, PyObject *args)
{
PyObject *obj;
Py_ssize_t offset = 0, size = Py_END_OF_BUFFER;
if (!PyArg_ParseTuple(args, "O|" F_PY_SSIZE_T F_PY_SSIZE_T,
&obj, &offset, &size))
return NULL;
return PyBuffer_FromReadWriteObject(obj, offset, size);
}
static PyObject*
processing_address_of_buffer(PyObject *self, PyObject *obj)
{
void *buffer;
Py_ssize_t buffer_len;
if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0)
return NULL;
return Py_BuildValue(F_POINTER F_PY_SSIZE_T, buffer, buffer_len);
}
/*
* Function table
*/
static PyMethodDef module_methods[] = {
{"rwbuffer", processing_rwbuffer, METH_VARARGS,
"rwbuffer(obj [, offset[, size]]) -> buffer\n"
"Create a writable view of obj assuming obj supports buffer inteface"},
{"address_of_buffer", processing_address_of_buffer, METH_O,
"address_of_buffer(obj) -> integer\n"
"Return address of obj assuming obj supports buffer inteface"},
#if HAVE_FD_TRANSFER
{"sendfd", processing_sendfd, METH_VARARGS,
"sendfd(sockfd, fd) -> None\n"
"Send file descriptor given by fd over the unix domain socket\n"
"whose file decriptor is sockfd"},
{"recvfd", processing_recvfd, METH_VARARGS,
"recvfd(sockfd) -> fd\n"
"Receive a file descriptor over a unix domain socket\n"
"whose file decriptor is sockfd"},
#endif
#if defined(MS_WINDOWS) && PY_VERSION_HEX < 0x02060000
{"changefd", (PyCFunction)processing_changefd, METH_VARARGS,
"changefd(fd, family, type [, proto]) -> None\n"
"Replace the file descriptor etc of an existing socket object\n"
"the old fd is closed, and replaced with a duplicate of fd"},
#endif
{NULL}
};
/*
* Initialize
*/
PyMODINIT_FUNC
init_processing(void)
{
PyObject *module, *temp;
/* Initialize module */
module = Py_InitModule("_processing", module_methods);
if (!module)
return;
/* Get copy of objects from cPickle */
temp = PyImport_ImportModule("cPickle");
if (!temp)
return;
dumpsFunction = PyObject_GetAttrString(temp, "dumps");
loadsFunction = PyObject_GetAttrString(temp, "loads");
protocol = PyObject_GetAttrString(temp, "HIGHEST_PROTOCOL");
Py_XDECREF(temp);
/* Add ProcessError to module */
ProcessError = PyErr_NewException("_processing.ProcessError", NULL, NULL);
if (!ProcessError)
return;
Py_INCREF(ProcessError);
PyModule_AddObject(module, "ProcessError", ProcessError);
/* Add BufferTooShort to module */
BufferTooShort = PyErr_NewException("_processing.BufferTooShort",
ProcessError, NULL);
if (!BufferTooShort)
return;
Py_INCREF(BufferTooShort);
PyModule_AddObject(module, "BufferTooShort", BufferTooShort);
/* Add connection type to module */
if (PyType_Ready(&ConnectionType) < 0)
return;
Py_INCREF(&ConnectionType);
PyModule_AddObject(module, "Connection", (PyObject*)&ConnectionType);
#if defined(MS_WINDOWS) || HAVE_SEM_OPEN
/* Add SemLock type to module */
if (PyType_Ready(&SemLockType) < 0)
return;
Py_INCREF(&SemLockType);
PyModule_AddObject(module, "SemLock", (PyObject*)&SemLockType);
#endif
#ifdef MS_WINDOWS
/* Add PipeConnection to module */
if (PyType_Ready(&PipeConnectionType) < 0)
return;
Py_INCREF(&PipeConnectionType);
PyModule_AddObject(module,"PipeConnection",(PyObject*)&PipeConnectionType);
/* Initialize win32 class and add to processing */
temp = create_win32_namespace();
if (!temp)
return;
PyModule_AddObject(module, "win32", temp);
/* Initialize the event handle used to signal Ctrl-C */
main_thread = GetCurrentThreadId(); /* hope not imported by subthread */
hInterruptEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
if (!hInterruptEvent) {
PyErr_SetFromWindowsErr(0);
return;
}
if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) {
PyErr_SetFromWindowsErr(0);
return;
}
#endif
}
/*
* A type which wraps a semaphore
*
* semaphore.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "processing.h"
enum { RECURSIVE_MUTEX, SEMAPHORE, BOUNDED_SEMAPHORE };
typedef struct {
PyObject_HEAD
SEM_HANDLE handle;
long last_tid;
int count;
int maxvalue;
int kind;
} SemLock;
#define ISMINE(o) (o->count > 0 && PyThread_get_thread_ident() == o->last_tid)
#ifdef MS_WINDOWS
/*
* Windows definitions
*/
static SECURITY_ATTRIBUTES sa = {sizeof(SECURITY_ATTRIBUTES), NULL, TRUE};
#define SEM_FAILED NULL
#define SEM_CLEAR_ERROR() SetLastError(0)
#define SEM_GET_LAST_ERROR() GetLastError()
#define SEM_CREATE(name, val, max) CreateSemaphore(&sa, val, max, NULL)
#define SEM_POST(sem) (ReleaseSemaphore(sem, 1, NULL) ? 0 : -1)
#define SEM_CLOSE(sem) (CloseHandle(sem) ? 0 : -1)
#define SEM_GETVALUE(sem, pval) _SemLock_GetSemaphoreValue(sem, pval)
#define SEM_UNLINK(name) 0
static int
_SemLock_GetSemaphoreValue(HANDLE handle, long *value)
{
long previous;
switch (WaitForSingleObject(handle, 0)) {
case WAIT_OBJECT_0:
if (!ReleaseSemaphore(handle, 1, &previous))
return STANDARD_ERROR;
*value = previous + 1;
return 0;
case WAIT_TIMEOUT:
*value = 0;
return 0;
default:
return STANDARD_ERROR;
}
}
static PyObject *
SemLock_acquire(SemLock *self, PyObject *args, PyObject *kwds)
{
int blocking = 1;
double timeout;
PyObject *timeout_obj = Py_None;
DWORD res, dwTimeout;
static char *kwlist[] = {"block", "timeout", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist,
&blocking, &timeout_obj))
return NULL;
if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) {
++self->count;
Py_RETURN_TRUE;
}
/* work out timeout */
if (!blocking) {
dwTimeout = 0;
} else if (timeout_obj == Py_None) {
dwTimeout = INFINITE;
} else {
timeout = PyFloat_AsDouble(timeout_obj);
if (PyErr_Occurred())
return NULL;
timeout *= 1000.0; /* convert to millisecs */
if (timeout < 0.0) {
timeout = 0.0;
} else if (timeout >= ((double)INFINITE - 0.5)) {
PyErr_SetString(PyExc_OverflowError, "timeout is too large");
return NULL;
}
dwTimeout = (DWORD)(timeout + 0.5);
}
/* do the wait */
if (dwTimeout == 0) {
res = WaitForSingleObject(self->handle, 0);
} else if (main_thread == GetCurrentThreadId()) {
res = WaitForSingleObject(self->handle, 0);
if (res == WAIT_TIMEOUT) {
HANDLE handles[2] = {self->handle, hInterruptEvent};
ResetEvent(hInterruptEvent);
Py_BEGIN_ALLOW_THREADS
res = WaitForMultipleObjects(2, handles, FALSE, dwTimeout);
Py_END_ALLOW_THREADS
}
} else {
Py_BEGIN_ALLOW_THREADS
res = WaitForSingleObject(self->handle, dwTimeout);
Py_END_ALLOW_THREADS
}
/* handle result */
switch (res) {
case WAIT_TIMEOUT:
Py_RETURN_FALSE;
case WAIT_OBJECT_0:
self->last_tid = GetCurrentThreadId();
++self->count;
Py_RETURN_TRUE;
case (WAIT_OBJECT_0 + 1): /* we got SIGINT; do like in timemodule.c */
Sleep(1);
errno = EINTR;
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
case WAIT_FAILED:
return PyErr_SetFromWindowsErr(0);
default:
PyErr_SetString(PyExc_RuntimeError, "WaitForSingleObject() or "
"WaitForMultipleObjects() gave unrecognized value");
return NULL;
}
}
static PyObject *
SemLock_release(SemLock *self, PyObject *args)
{
if (self->kind == RECURSIVE_MUTEX) {
if (!ISMINE(self)) {
PyErr_SetString(PyExc_AssertionError, "attempt to release "
"recursive lock not owned by thread");
return NULL;
}
if (self->count > 1) {
--self->count;
Py_RETURN_NONE;
}
assert(self->count == 1);
}
if (!ReleaseSemaphore(self->handle, 1, NULL)) {
if (GetLastError() == ERROR_TOO_MANY_POSTS) {
PyErr_SetString(PyExc_ValueError,
"semaphore or lock released too many times");
return NULL;
} else {
return PyErr_SetFromWindowsErr(0);
}
}
--self->count;
Py_RETURN_NONE;
}
#else /* !MS_WINDOWS */
/*
* Unix definitions
*/
#define SEM_CLEAR_ERROR()
#define SEM_GET_LAST_ERROR() 0
#define SEM_CREATE(name, val, max) sem_open(name, O_CREAT | O_EXCL, 0600, val)
#define SEM_POST(sem) sem_post(sem)
#define SEM_CLOSE(sem) sem_close(sem)
#define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval)
#define SEM_UNLINK(name) sem_unlink(name)
#if HAVE_BROKEN_SEM_UNLINK
# define sem_unlink(name) 0
#endif
#if !HAVE_SEM_TIMEDWAIT
# define sem_timedwait(sem,deadline) sem_timedwait_save(sem,deadline,_save)
int
sem_timedwait_save(sem_t *sem, struct timespec *deadline, PyThreadState *_save)
{
int res;
unsigned long delay, difference;
struct timeval now, tvdeadline, tvdelay;
errno = 0;
tvdeadline.tv_sec = deadline->tv_sec;
tvdeadline.tv_usec = deadline->tv_nsec / 1000;
for (delay = 0 ; ; delay += 1000) {
/* poll */
if (sem_trywait(sem) == 0)
return 0;
else if (errno != EAGAIN)
return STANDARD_ERROR;
/* get current time */
if (gettimeofday(&now, NULL) < 0)
return STANDARD_ERROR;
/* check for timeout */
if (tvdeadline.tv_sec < now.tv_sec ||
(tvdeadline.tv_sec == now.tv_sec &&
tvdeadline.tv_usec <= now.tv_usec)) {
errno = ETIMEDOUT;
return STANDARD_ERROR;
}
/* calculate how much time is left */
difference = (tvdeadline.tv_sec - now.tv_sec) * 1000000 +
(tvdeadline.tv_usec - now.tv_usec);
/* check delay not too long -- maximum is 20 msecs */
if (delay > 20000)
delay = 20000;
if (delay > difference)
delay = difference;
/* sleep */
tvdelay.tv_sec = delay / 1000000;
tvdelay.tv_usec = delay % 1000000;
if (select(0, NULL, NULL, NULL, &tvdelay) < 0)
return STANDARD_ERROR;
/* check for signals */
Py_BLOCK_THREADS
res = PyErr_CheckSignals();
Py_UNBLOCK_THREADS
if (res) {
errno = EINTR;
return EXCEPTION_HAS_BEEN_SET;
}
}
}
#endif /* !HAVE_SEM_TIMEDWAIT */
static PyObject *
SemLock_acquire(SemLock *self, PyObject *args, PyObject *kwds)
{
int blocking = 1, res;
double timeout;
PyObject *timeout_obj = Py_None;
struct timespec deadline = {0};
struct timeval now;
long sec, nsec;
static char *kwlist[] = {"block", "timeout", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO", kwlist,
&blocking, &timeout_obj))
return NULL;
if (self->kind == RECURSIVE_MUTEX && ISMINE(self)) {
++self->count;
Py_RETURN_TRUE;
}
if (timeout_obj != Py_None) {
timeout = PyFloat_AsDouble(timeout_obj);
if (PyErr_Occurred())
return NULL;
if (timeout < 0.0)
timeout = 0.0;
if (gettimeofday(&now, NULL) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
sec = (long) timeout;
nsec = (long) (1e9 * (timeout - sec) + 0.5);
deadline.tv_sec = now.tv_sec + sec;
deadline.tv_nsec = now.tv_usec * 1000 + nsec;
deadline.tv_sec += (deadline.tv_nsec / 1000000000);
deadline.tv_nsec %= 1000000000;
}
do {
Py_BEGIN_ALLOW_THREADS
if (blocking && timeout_obj == Py_None)
res = sem_wait(self->handle);
else if (!blocking)
res = sem_trywait(self->handle);
else
res = sem_timedwait(self->handle, &deadline);
Py_END_ALLOW_THREADS
if (res == EXCEPTION_HAS_BEEN_SET)
break;
} while (res < 0 && errno == EINTR && !PyErr_CheckSignals());
if (res < 0) {
if (errno == EAGAIN || errno == ETIMEDOUT)
Py_RETURN_FALSE;
else if (errno == EINTR)
return NULL;
else
return PyErr_SetFromErrno(PyExc_OSError);
}
++self->count;
self->last_tid = PyThread_get_thread_ident();
Py_RETURN_TRUE;
}
static PyObject *
SemLock_release(SemLock *self, PyObject *args)
{
int sval;
switch (self->kind) {
case RECURSIVE_MUTEX:
if (!ISMINE(self)) {
PyErr_SetString(PyExc_AssertionError, "attempt to release "
"recursive lock not owned by thread");
return NULL;
}
if (self->count > 1) {
--self->count;
Py_RETURN_NONE;
}
assert(self->count == 1);
break;
case BOUNDED_SEMAPHORE:
/* This check is unreliable since value may change before post.
However, it will only fail if something has gone wrong. */
if (sem_getvalue(self->handle, &sval) < 0)
return PyErr_SetFromErrno(PyExc_OSError);
if (sval >= self->maxvalue) {
PyErr_SetString(PyExc_ValueError,
"semaphore or lock released too many times");
return NULL;
}
}
if (sem_post(self->handle) < 0)
return PyErr_SetFromErrno(PyExc_OSError);
--self->count;
Py_RETURN_NONE;
}
#endif /* !MS_WINDOWS */
/*
* All platforms
*/
static PyObject *
_SemLock_create(PyTypeObject *type, SEM_HANDLE handle, int kind, int maxvalue)
{
SemLock *self;
self = (SemLock*)type->tp_alloc(type, 0);
if (!self)
return NULL;
self->handle = handle;
self->kind = kind;
self->count = 0;
self->last_tid = 0;
self->maxvalue = maxvalue;
return (PyObject*)self;
}
static PyObject *
SemLock_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
char buffer[256];
SEM_HANDLE handle = SEM_FAILED;
int kind, maxvalue, value;
PyObject *result;
static char *kwlist[] = {"kind", "value", NULL};
static int counter = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwlist, &kind, &value))
return NULL;
if (kind < RECURSIVE_MUTEX || kind > BOUNDED_SEMAPHORE) {
PyErr_SetString(PyExc_ValueError, "unrecongnized blocker type");
return NULL;
}
PyOS_snprintf(buffer, sizeof(buffer), "/pys-%d-%d", getpid(), counter++);
if (kind == BOUNDED_SEMAPHORE)
maxvalue = value;
else if (kind == RECURSIVE_MUTEX)
maxvalue = 1;
else
maxvalue = INT_MAX;
SEM_CLEAR_ERROR();
handle = SEM_CREATE(buffer, value, maxvalue);
/* On Windows we should fail if GetLastError() == ERROR_ALREADY_EXISTS */
if (handle == SEM_FAILED || SEM_GET_LAST_ERROR() != 0)
goto failure;
if (SEM_UNLINK(buffer) < 0)
goto failure;
result = _SemLock_create(type, handle, kind, maxvalue);
if (!result)
goto failure;
return result;
failure:
if (handle != SEM_FAILED)
SEM_CLOSE(handle);
SetException(NULL, STANDARD_ERROR);
return NULL;
}
static PyObject *
SemLock_rebuild(PyTypeObject *type, PyObject *args)
{
SEM_HANDLE handle;
int kind, maxvalue;
if (!PyArg_ParseTuple(args, F_SEM_HANDLE "ii", &handle, &kind, &maxvalue))
return NULL;
return _SemLock_create(type, handle, kind, maxvalue);
}
static void
SemLock_dealloc(SemLock* self)
{
if (self->handle != SEM_FAILED)
SEM_CLOSE(self->handle);
self->ob_type->tp_free((PyObject*)self);
}
static PyObject *
SemLock_count(SemLock *self)
{
return PyInt_FromLong((long)self->count);
}
static PyObject *
SemLock_ismine(SemLock *self)
{
/* only makes sense for a lock */
return PyBool_FromLong(ISMINE(self));
}
static PyObject *
SemLock_getvalue(SemLock *self)
{
int sval;
if (SEM_GETVALUE(self->handle, &sval) < 0)
SetException(NULL, STANDARD_ERROR);
return PyInt_FromLong((long)sval);
}
static PyObject *
SemLock_afterfork(SemLock *self)
{
self->count = 0;
Py_RETURN_NONE;
}
/*
* Semaphore methods
*/
static PyMethodDef SemLock_methods[] = {
{"acquire", (PyCFunction)SemLock_acquire, METH_KEYWORDS,
"acquire the semaphore/lock"},
{"release", (PyCFunction)SemLock_release, METH_NOARGS,
"release the semaphore/lock"},
{"__enter__", (PyCFunction)SemLock_acquire, METH_KEYWORDS,
"enter the semaphore/lock"},
{"__exit__", (PyCFunction)SemLock_release, METH_VARARGS,
"exit the semaphore/lock"},
{"_count", (PyCFunction)SemLock_count, METH_NOARGS,
"number of `acquire()`s minus number of `release()`s for this process"},
{"_ismine", (PyCFunction)SemLock_ismine, METH_NOARGS,
"whether the lock is owned by this thread"},
{"_getvalue", (PyCFunction)SemLock_getvalue, METH_NOARGS,
"get the value of the semaphore"},
{"_rebuild", (PyCFunction)SemLock_rebuild, METH_VARARGS | METH_CLASS,
""},
{"_afterfork", (PyCFunction)SemLock_afterfork, METH_NOARGS,
"rezero the net acquisition count after fork()"},
{NULL}
};
/*
* Member table
*/
static PyMemberDef SemLock_members[] = {
{"handle", T_SEM_HANDLE, offsetof(SemLock, handle), READONLY, ""},
{"kind", T_INT, offsetof(SemLock, kind), READONLY, ""},
{"maxvalue", T_INT, offsetof(SemLock, maxvalue), READONLY, ""},
{NULL}
};
/*
* Semaphore type
*/
PyTypeObject SemLockType = {
PyObject_HEAD_INIT(NULL)
0, /* ob_size */
"_processing.SemLock", /* tp_name */
sizeof(SemLock), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)SemLock_dealloc,
/* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
/* tp_flags */
"Semaphore/Mutex type", /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
SemLock_methods, /* tp_methods */
SemLock_members, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
(newfunc)SemLock_new, /* tp_new */
};
/*
* Win32 functions used by `processing` package
*
* win_functions.c
*
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "processing.h"
#define WIN32_FUNCTION(func) \
{#func, (PyCFunction)win32_ ## func, METH_VARARGS | METH_STATIC, ""}
#define WIN32_CONSTANT(fmt, con) \
PyDict_SetItemString(Win32Type.tp_dict, #con, Py_BuildValue(fmt, con))
static PyObject *
win32_CloseHandle(PyObject *self, PyObject *args)
{
HANDLE hObject;
BOOL success;
if (!PyArg_ParseTuple(args, F_HANDLE, &hObject))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = CloseHandle(hObject);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_ConnectNamedPipe(PyObject *self, PyObject *args)
{
HANDLE hNamedPipe;
LPOVERLAPPED lpOverlapped;
BOOL success;
if (!PyArg_ParseTuple(args, F_HANDLE F_POINTER,
&hNamedPipe, &lpOverlapped))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = ConnectNamedPipe(hNamedPipe, lpOverlapped);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_CreateFile(PyObject *self, PyObject *args)
{
LPCTSTR lpFileName;
DWORD dwDesiredAccess;
DWORD dwShareMode;
LPSECURITY_ATTRIBUTES lpSecurityAttributes;
DWORD dwCreationDisposition;
DWORD dwFlagsAndAttributes;
HANDLE hTemplateFile;
HANDLE handle;
if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_POINTER
F_DWORD F_DWORD F_HANDLE,
&lpFileName, &dwDesiredAccess, &dwShareMode,
&lpSecurityAttributes, &dwCreationDisposition,
&dwFlagsAndAttributes, &hTemplateFile))
return NULL;
Py_BEGIN_ALLOW_THREADS
handle = CreateFile(lpFileName, dwDesiredAccess, dwShareMode,
lpSecurityAttributes, dwCreationDisposition,
dwFlagsAndAttributes, hTemplateFile);
Py_END_ALLOW_THREADS
if (handle == INVALID_HANDLE_VALUE)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, handle);
}
static PyObject *
win32_CreateNamedPipe(PyObject *self, PyObject *args)
{
LPCTSTR lpName;
DWORD dwOpenMode;
DWORD dwPipeMode;
DWORD nMaxInstances;
DWORD nOutBufferSize;
DWORD nInBufferSize;
DWORD nDefaultTimeOut;
LPSECURITY_ATTRIBUTES lpSecurityAttributes;
HANDLE handle;
if (!PyArg_ParseTuple(args, "s" F_DWORD F_DWORD F_DWORD
F_DWORD F_DWORD F_DWORD F_POINTER,
&lpName, &dwOpenMode, &dwPipeMode, &nMaxInstances,
&nOutBufferSize, &nInBufferSize, &nDefaultTimeOut,
&lpSecurityAttributes))
return NULL;
Py_BEGIN_ALLOW_THREADS
handle = CreateNamedPipe(lpName, dwOpenMode, dwPipeMode, nMaxInstances,
nOutBufferSize, nInBufferSize, nDefaultTimeOut,
lpSecurityAttributes);
Py_END_ALLOW_THREADS
if (handle == INVALID_HANDLE_VALUE)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, handle);
}
static PyObject *
win32_ExitProcess(PyObject *self, PyObject *args)
{
UINT uExitCode;
if (!PyArg_ParseTuple(args, "I", &uExitCode))
return NULL;
ExitProcess(uExitCode);
return NULL;
}
static PyObject *
win32_GenerateConsoleCtrlEvent(PyObject *self, PyObject *args)
{
DWORD dwCtrlEvent;
DWORD dwProcessGroupId;
if (!PyArg_ParseTuple(args, F_DWORD F_DWORD,
&dwCtrlEvent, &dwProcessGroupId))
return NULL;
if (!GenerateConsoleCtrlEvent(dwCtrlEvent, dwProcessGroupId))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_GetHandleInformation(PyObject *self, PyObject *args)
{
HANDLE hObject;
DWORD dwFlags;
if (!PyArg_ParseTuple(args, F_HANDLE, &hObject))
return NULL;
if (!GetHandleInformation(hObject, &dwFlags))
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_DWORD, dwFlags);
}
static PyObject *
win32_GetLastError(PyObject *self, PyObject *args)
{
return Py_BuildValue(F_DWORD, GetLastError());
}
static PyObject *
win32_OpenProcess(PyObject *self, PyObject *args)
{
DWORD dwDesiredAccess;
BOOL bInheritHandle;
DWORD dwProcessId;
HANDLE handle;
if (!PyArg_ParseTuple(args, F_DWORD "i" F_DWORD,
&dwDesiredAccess, &bInheritHandle, &dwProcessId))
return NULL;
handle = OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId);
if (handle == NULL)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue(F_HANDLE, handle);
}
static PyObject *
win32_SetConsoleCtrlHandler(PyObject *self, PyObject *args)
{
PHANDLER_ROUTINE HandlerRoutine;
BOOL Add;
if (!PyArg_ParseTuple(args, F_POINTER "i", &HandlerRoutine, &Add))
return NULL;
if (!SetConsoleCtrlHandler(HandlerRoutine, Add))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_SetHandleInformation(PyObject *self, PyObject *args)
{
HANDLE hObject;
DWORD dwMask;
DWORD dwFlags;
if (!PyArg_ParseTuple(args, F_HANDLE F_DWORD F_DWORD,
&hObject, &dwMask, &dwFlags))
return NULL;
if (!SetHandleInformation(hObject, dwMask, dwFlags))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_SetNamedPipeHandleState(PyObject *self, PyObject *args)
{
HANDLE hNamedPipe;
PyObject *oArgs[3];
DWORD dwArgs[3], *pArgs[3] = {NULL, NULL, NULL};
int i;
if (!PyArg_ParseTuple(args, F_HANDLE "OOO",
&hNamedPipe, &oArgs[0], &oArgs[1], &oArgs[2]))
return NULL;
PyErr_Clear();
for (i = 0 ; i < 3 ; i++) {
if (oArgs[i] != Py_None) {
dwArgs[i] = PyInt_AsUnsignedLongMask(oArgs[i]);
if (PyErr_Occurred())
return NULL;
pArgs[i] = &dwArgs[i];
}
}
if (!SetNamedPipeHandleState(hNamedPipe, pArgs[0], pArgs[1], pArgs[2]))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_TerminateProcess(PyObject *self, PyObject *args)
{
HANDLE hProcess;
UINT uExitCode;
if (!PyArg_ParseTuple(args, F_HANDLE "I", &hProcess, &uExitCode))
return NULL;
if (!TerminateProcess(hProcess, uExitCode))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_WaitNamedPipe(PyObject *self, PyObject *args)
{
LPCTSTR lpNamedPipeName;
DWORD nTimeOut;
BOOL success;
if (!PyArg_ParseTuple(args, "s" F_DWORD, &lpNamedPipeName, &nTimeOut))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = WaitNamedPipe(lpNamedPipeName, nTimeOut);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyMethodDef win32_methods[] = {
WIN32_FUNCTION(CloseHandle),
WIN32_FUNCTION(ConnectNamedPipe),
WIN32_FUNCTION(CreateFile),
WIN32_FUNCTION(CreateNamedPipe),
WIN32_FUNCTION(ExitProcess),
WIN32_FUNCTION(GenerateConsoleCtrlEvent),
WIN32_FUNCTION(GetHandleInformation),
WIN32_FUNCTION(GetLastError),
WIN32_FUNCTION(OpenProcess),
WIN32_FUNCTION(SetConsoleCtrlHandler),
WIN32_FUNCTION(SetHandleInformation),
WIN32_FUNCTION(SetNamedPipeHandleState),
WIN32_FUNCTION(TerminateProcess),
WIN32_FUNCTION(WaitNamedPipe),
{NULL}
};
PyTypeObject Win32Type = {
PyObject_HEAD_INIT(NULL)
};
PyObject *
create_win32_namespace(void)
{
Win32Type.tp_name = "_processing.win32";
Win32Type.tp_methods = win32_methods;
if (PyType_Ready(&Win32Type) < 0)
return NULL;
Py_INCREF(&Win32Type);
WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);
WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
WIN32_CONSTANT(F_DWORD, GENERIC_READ);
WIN32_CONSTANT(F_DWORD, GENERIC_WRITE);
WIN32_CONSTANT(F_DWORD, HANDLE_FLAG_INHERIT);
WIN32_CONSTANT(F_DWORD, NMPWAIT_WAIT_FOREVER);
WIN32_CONSTANT(F_DWORD, OPEN_EXISTING);
WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_DUPLEX);
WIN32_CONSTANT(F_DWORD, PIPE_ACCESS_INBOUND);
WIN32_CONSTANT(F_DWORD, PIPE_READMODE_MESSAGE);
WIN32_CONSTANT(F_DWORD, PIPE_TYPE_MESSAGE);
WIN32_CONSTANT(F_DWORD, PIPE_UNLIMITED_INSTANCES);
WIN32_CONSTANT(F_DWORD, PIPE_WAIT);
WIN32_CONSTANT(F_DWORD, PROCESS_ALL_ACCESS);
WIN32_CONSTANT(F_POINTER, NULL);
return (PyObject*)&Win32Type;
}
#
# Unit tests for processing package
#
import unittest
import threading
import Queue
import time
import sys
import os
import signal
import array
import copy
import socket
import random
import ctypes
import processing.dummy
import processing.connection
import processing.managers
import processing.heap
import processing.managers
import processing.pool
#
# Constants
#
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
#
# Monkey patch threading._Semaphore and proxy type to have getValue()
#
def getValue(self):
return self._Semaphore__value
threading._Semaphore.getValue = getValue
def getValue(self):
return self._callmethod('getValue')
processing.managers.AcquirerProxy.getValue = getValue
class SyncManager(processing.managers.SyncManager):
# We create a trivial manager subclass. This ensures that this
# module will be imported in the manager process, so monkey
# patching of threading.Semaphore will affect the manager process.
pass
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
current = self.currentProcess()
self.assertTrue(current.isAlive())
self.assertTrue(not current.isDaemon())
if self.TYPE != 'threads':
authkey = current.getAuthKey()
self.assertTrue(type(authkey) is str)
self.assertTrue(len(authkey) > 0)
self.assertTrue(current.getExitCode() is None)
self.assertEqual(current.getPid(), os.getpid())
def _test(self, q, *args, **kwds):
current = self.currentProcess()
q.put(args)
q.put(kwds)
q.put(current.getName())
if self.TYPE != 'threads':
q.put(current.getAuthKey())
q.put(current.getPid())
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.setDaemon(True)
current = self.currentProcess()
if self.TYPE != 'threads':
self.assertEquals(p.getAuthKey(), current.getAuthKey())
self.assertEquals(p.getExitCode(), None)
self.assertEquals(p.isAlive(), False)
self.assertEquals(p.isDaemon(), True)
self.assertTrue(p not in self.activeChildren())
self.assertTrue(type(self.activeChildren()) is list)
p.start()
self.assertEquals(p.getExitCode(), None)
self.assertEquals(p.isAlive(), True)
self.assertTrue(p in self.activeChildren())
self.assertEquals(q.get(), args[1:])
self.assertEquals(q.get(), kwargs)
self.assertEquals(q.get(), p.getName())
if self.TYPE != 'threads':
self.assertEquals(q.get(), current.getAuthKey())
self.assertEquals(q.get(), p.getPid())
p.join()
self.assertEquals(p.getExitCode(), 0)
self.assertEquals(p.isAlive(), False)
self.assertTrue(p not in self.activeChildren())
def _test_terminate(self):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.setDaemon(True)
p.start()
self.assertEqual(p.isAlive(), True)
self.assertTrue(p in self.activeChildren())
self.assertEqual(p.getExitCode(), None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.isAlive(), False)
self.assertTrue(p not in self.activeChildren())
p.join()
# XXX sometimes get p.getExitCode() == 0 on Windows ...
#self.assertEqual(p.getExitCode(), -signal.SIGTERM)
def test_cpuCount(self):
try:
cpus = processing.cpuCount()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_activeChildren(self):
self.assertEqual(type(self.activeChildren()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertTrue(p not in self.activeChildren())
p.start()
self.assertTrue(p in self.activeChildren())
p.join()
self.assertTrue(p not in self.activeChildren())
def _test_recursion(self, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = self.Process(
target=self._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(processing.Process):
def __init__(self):
processing.Process.__init__(self)
self.child_conn, self.parent_conn = processing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
class _TestQueue(BaseTestCase):
def _test_put(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
queue = self.Queue(maxsize=6)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.setDaemon(True)
proc.start()
self.assertEqual(queue.empty(), True)
self.assertEqual(queue.full(), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue.empty(), False)
self.assertEqual(queue.full(), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue.empty(), True)
self.assertEqual(queue.full(), False)
proc.join()
def _test_get(self, queue, child_can_start, parent_can_continue):
child_can_start.wait()
queue.put(1)
queue.put(2)
if self.TYPE == 'processes':
queue.putmany([3, 4, 5])
else:
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.setDaemon(True)
proc.start()
self.assertEqual(queue.empty(), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue.empty(), False)
self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue.empty(), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
def _test_fork(self, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises(AssertionError, lock.release)
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertEqual(sem.getValue(), 2)
self.assertEqual(sem.acquire(), True)
self.assertEqual(sem.getValue(), 1)
self.assertEqual(sem.acquire(), True)
self.assertEqual(sem.getValue(), 0)
self.assertEqual(sem.acquire(False), False)
self.assertEqual(sem.getValue(), 0)
self.assertEqual(sem.release(), None)
self.assertEqual(sem.getValue(), 1)
self.assertEqual(sem.release(), None)
self.assertEqual(sem.getValue(), 2)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertEqual(sem.getValue(), 3)
self.assertEqual(sem.release(), None)
self.assertEqual(sem.getValue(), 4)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
self.assertRaises(ValueError, sem.release)
self.assertEqual(sem.getValue(), 2)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
def test_getvalue_bound(self):
sem = self.BoundedSemaphore(4)
self.assertEqual(sem.getValue(), 4)
class _TestCondition(BaseTestCase):
def f(self, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
sleepers = (cond._sleeping_count.getValue() -
cond._woken_count.getValue())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.getValue(), 0)
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.setDaemon(True)
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.setDaemon(True)
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertEqual(woken.getValue(), 0)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertEqual(woken.getValue(), 1)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertEqual(woken.getValue(), 2)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notifyAll(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.setDaemon(True)
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.setDaemon(True)
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertEqual(woken.getValue(), 0)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.setDaemon(True)
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.setDaemon(True)
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertEqual(woken.getValue(), 0)
# wake them all up
cond.acquire()
cond.notifyAll()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertEqual(woken.getValue(), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
def _test_event(self, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
self.assertEqual(event.isSet(), False)
self.assertEqual(wait(0.0), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
self.assertEqual(event.isSet(), True)
self.assertEqual(wait(), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), None)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(event.isSet(), True)
event.clear()
self.assertEqual(event.isSet(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), None)
#
#
#
class _TestValue(BaseTestCase):
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', 'x', 'y'),
]
def _test(self, values):
for sv, cv in zip(values, self.codes_values):
sv.value = cv[2]
def test_sharedvalue(self, lock=False):
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_synchronized(self):
self.test_sharedvalue(lock=True)
def test_getobj_getlock(self):
if self.TYPE != 'processes':
return
lock = self.Lock()
obj = self.Value('i', 5, lock=lock)
self.assertEqual(obj.getlock(), lock)
self.assertEqual(obj.getobj().value, 5)
class _TestArray(BaseTestCase):
def f(self, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
def test_sharedarray(self, lock=True):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
arr = self.Array('i', seq, lock)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr), list(seq))
def test_synchronized(self):
self.test_sharedarray(lock=True)
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a, self.list(range(10)))
self.assertEqual(a, range(10))
self.assertTrue(a != range(11))
self.assertTrue(range(9) < a)
self.assertTrue(a < range(11))
d = [a, b]
e = self.list(d)
self.assertEqual(
e, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
it = iter(a)
self.assertEqual(tuple(it), (0, 1, 2, 3, 4, 5, 6, 7, 8, 9))
f = self.list([a])
a.append('hello')
self.assertEqual(f, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
for i in range(5):
d[i] = chr(65 + i)
self.assertEqual(
d, {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E'}
)
self.assertEqual(list(d), range(5))
self.assertEqual(
list(d.iteritems()),
[(0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E')]
)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), apply(sqr, (5,)))
self.assertEqual(papply(sqr, (), {'x':3}), apply(sqr, (), {'x':3}))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, 100))
get = TimingWrapper(res.get)
self.assertRaises(processing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
# One of the workers will be occupied a long time (probably
# till the pool gets terminated), but there are other workers
# so who cares.
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
p = processing.Pool(3)
self.assertEqual(3, len(p._pool))
#
#
#
class _TestZZZDebugInfo(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_debug_info(self):
# this gets run after all the other tests for the manager
# and it tests that there have been no reference leaks for
# the managers shared objects
debug = self._debug_info()
if debug:
print debug
self.assertTrue(not debug)
#
#
#
from processing.managers import (
BaseManager, BaseProxy, CreatorMethod, RemoteError
)
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
class MyManager(BaseManager):
Foo = CreatorMethod(FooBar)
Bar = CreatorMethod(FooBar, exposed=('f', '_h'))
baz = CreatorMethod(baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
#
#
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _echo(self, conn):
for msg in iter(conn.recvbytes, ''):
conn.sendbytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.setDaemon(True)
p.start()
seq = [1, 2.25, None]
msg = 'hello world'
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.sendbytes(msg), None)
self.assertEqual(conn.recvbytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.sendbytes(arr), None)
self.assertEqual(conn.recvbytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.sendbytes(arr), None)
self.assertEqual(conn.recvbytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('c', ' ' * 40)
self.assertEqual(conn.sendbytes(longmsg), None)
try:
res = conn.recvbytes_into(buffer)
except processing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = 'X' * (1024 * 1024 * 16) # 16 megabytes
conn.sendbytes(really_big_msg)
self.assertEqual(conn.recvbytes(), really_big_msg)
conn.sendbytes('') # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recvbytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, reader.send, 2)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
conn.sendbytes('hello')
self.assertEqual(conn.recvbytes(), 'hello')
conn.sendbytes('')
conn.close()
p.join()
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _test(self, address):
conn = self.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.setDaemon(True)
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
if not self.connection.connections_are_picklable:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = 'This connection uses family %s' % fam
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = 'This connection uses a normal socket'
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = processing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = processing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
#
#
#
class _Foo(ctypes.Structure):
_fields_ = [
('x', ctypes.c_int),
('y', ctypes.c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _double(self, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, sync=False):
from processing.sharedctypes import Value, Array, synchronized
x = Value('i', 7)
y = Value(ctypes.c_double, 1.0/3.0)
foo = Value(_Foo, 3, 2)
arr = Array('d', range(10))
string = Array('c', 20)
string.value = 'hello'
args = [x, y, foo, arr, string]
if sync:
args = [synchronized(obj) for obj in args]
p = self.Process(target=self._double, args=args)
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, 'hellohello')
def test_synchronize(self):
self.test_sharedctypes(sync=True)
def test_copy(self):
from processing.sharedctypes import Value, copy
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _test_finalize(self, conn):
from processing.finalize import Finalize
from processing.process import _exit_func
class Foo(object):
pass
a = Foo()
Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
Finalize(c, conn.send, args=('c',))
d10 = Foo()
Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
Finalize(d03, conn.send, args=('d03',), exitpriority=0)
Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call processing's cleanup function then exit process without
# garbage collecting locals
_exit_func()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01'])
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type[0].upper() + type[1:]
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = processing.Process
locals().update(get_attributes(processing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array',
'currentProcess', 'activeChildren', 'Pipe', 'connection'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = processing.Process
manager = object.__new__(SyncManager) # initialized by test_main()
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', '_debug_info'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = processing.dummy.Process
locals().update(get_attributes(processing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'currentProcess',
'activeChildren', 'Pipe', 'connection', 'dict', 'list',
'Namespace'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
#
#
#
def test_main(run=None):
if run is None:
from test.test_support import run_suite as run
ProcessesMixin.pool = processing.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
testcases = (
testcases_processes.values() +
testcases_threads.values() +
testcases_manager.values()
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
run(suite)
ManagerMixin.manager.shutdown()
ProcessesMixin.pool.terminate()
del ProcessesMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
+66
-0

@@ -0,1 +1,3 @@

.. default-role:: literal
==========================

@@ -5,2 +7,66 @@ Changelog for processing

Changes in 0.50
---------------
* `ctypes` is now a prerequisite if you want to use shared memory --
with Python 2.4 you will need to install it separately.
* `LocalManager()` has been removed.
* Added `processing.Value()` and `processing.Array()`
which are similar to `LocalManager.SharedValue()` and
`LocalManager.SharedArray()`.
* In the `sharedctypes` module `new_value()` and `new_array()` have
been renamed `Value()` and `Array()`.
* `Process.stop()`, `Process.getStoppable()` and
`Process.setStoppable()` have been removed. Use
`Process.terminate()` instead.
* `procesing.Lock` now matches `threading.Lock` behaviour more
closely: now a thread can release a lock it does not own, and now
when a thread tries acquiring a lock it already owns a deadlock
results instead of an exception.
* On Windows when the main thread is blocking on a method of `Lock`,
`RLock`, `Semaphore`, `BoundedSemaphore`, `Condition` it will no
longer ignore Ctrl-C. (The same was already true on Unix.)
This differs from the behaviour of the equivalent objects in
`threading` which will completely ignore Ctrl-C.
* The `test` sub-package has been replaced by lots of unit tests in a
`tests` sub-package. Some of the old test files have been moved
over to a new `examples` sub-package.
* On Windows it is now possible for a non-console python program
(i.e. one using `pythonw.exe` instead of `python.exe`) to use
`processing`.
Previously an exception was raised when `subprocess.py` tried to
duplicate stdin, stdout, stderr.
* Proxy objects should now be thread safe -- they now use thread local
storage.
* Trying to transfer shared resources such as locks, queues etc
between processes over a pipe or queue will now raise `RuntimeError`
with a message saying that the object should only be shared between
processes using inheritance.
Previously, this worked unreliably on Windows but would fail with an
unexplained `AssertionError` on Unix.
* The names of some of the macros used for compiling the extension
have changed. See `INSTALL.txt` and `setup.py`.
* A few changes which (hopefully) make compilation possible on Solaris.
* Lots of refactoring of the code.
* Fixed reference leaks so that unit tests pass with "regrtest -R::"
(at least on Linux).
Changes in 0.40

@@ -7,0 +73,0 @@ ---------------

+198
-136

@@ -14,5 +14,67 @@ <?xml version="1.0" encoding="utf-8" ?>

<div class="section">
<h1><a id="changes-in-0-50" name="changes-in-0-50">Changes in 0.50</a></h1>
<ul>
<li><p class="first"><tt class="docutils literal"><span class="pre">ctypes</span></tt> is now a prerequisite if you want to use shared memory --
with Python 2.4 you will need to install it separately.</p>
</li>
<li><p class="first"><tt class="docutils literal"><span class="pre">LocalManager()</span></tt> has been removed.</p>
</li>
<li><p class="first">Added <tt class="docutils literal"><span class="pre">processing.Value()</span></tt> and <tt class="docutils literal"><span class="pre">processing.Array()</span></tt>
which are similar to <tt class="docutils literal"><span class="pre">LocalManager.SharedValue()</span></tt> and
<tt class="docutils literal"><span class="pre">LocalManager.SharedArray()</span></tt>.</p>
</li>
<li><p class="first">In the <tt class="docutils literal"><span class="pre">sharedctypes</span></tt> module <tt class="docutils literal"><span class="pre">new_value()</span></tt> and <tt class="docutils literal"><span class="pre">new_array()</span></tt> have
been renamed <tt class="docutils literal"><span class="pre">Value()</span></tt> and <tt class="docutils literal"><span class="pre">Array()</span></tt>.</p>
</li>
<li><p class="first"><tt class="docutils literal"><span class="pre">Process.stop()</span></tt>, <tt class="docutils literal"><span class="pre">Process.getStoppable()</span></tt> and
<tt class="docutils literal"><span class="pre">Process.setStoppable()</span></tt> have been removed. Use
<tt class="docutils literal"><span class="pre">Process.terminate()</span></tt> instead.</p>
</li>
<li><p class="first"><tt class="docutils literal"><span class="pre">procesing.Lock</span></tt> now matches <tt class="docutils literal"><span class="pre">threading.Lock</span></tt> behaviour more
closely: now a thread can release a lock it does not own, and now
when a thread tries acquiring a lock it already owns a deadlock
results instead of an exception.</p>
</li>
<li><p class="first">On Windows when the main thread is blocking on a method of <tt class="docutils literal"><span class="pre">Lock</span></tt>,
<tt class="docutils literal"><span class="pre">RLock</span></tt>, <tt class="docutils literal"><span class="pre">Semaphore</span></tt>, <tt class="docutils literal"><span class="pre">BoundedSemaphore</span></tt>, <tt class="docutils literal"><span class="pre">Condition</span></tt> it will no
longer ignore Ctrl-C. (The same was already true on Unix.)</p>
<p>This differs from the behaviour of the equivalent objects in
<tt class="docutils literal"><span class="pre">threading</span></tt> which will completely ignore Ctrl-C.</p>
</li>
<li><p class="first">The <tt class="docutils literal"><span class="pre">test</span></tt> sub-package has been replaced by lots of unit tests in a
<tt class="docutils literal"><span class="pre">tests</span></tt> sub-package. Some of the old test files have been moved
over to a new <tt class="docutils literal"><span class="pre">examples</span></tt> sub-package.</p>
</li>
<li><p class="first">On Windows it is now possible for a non-console python program
(i.e. one using <tt class="docutils literal"><span class="pre">pythonw.exe</span></tt> instead of <tt class="docutils literal"><span class="pre">python.exe</span></tt>) to use
<tt class="docutils literal"><span class="pre">processing</span></tt>.</p>
<p>Previously an exception was raised when <tt class="docutils literal"><span class="pre">subprocess.py</span></tt> tried to
duplicate stdin, stdout, stderr.</p>
</li>
<li><p class="first">Proxy objects should now be thread safe -- they now use thread local
storage.</p>
</li>
<li><p class="first">Trying to transfer shared resources such as locks, queues etc
between processes over a pipe or queue will now raise <tt class="docutils literal"><span class="pre">RuntimeError</span></tt>
with a message saying that the object should only be shared between
processes using inheritance.</p>
<p>Previously, this worked unreliably on Windows but would fail with an
unexplained <tt class="docutils literal"><span class="pre">AssertionError</span></tt> on Unix.</p>
</li>
<li><p class="first">The names of some of the macros used for compiling the extension
have changed. See <tt class="docutils literal"><span class="pre">INSTALL.txt</span></tt> and <tt class="docutils literal"><span class="pre">setup.py</span></tt>.</p>
</li>
<li><p class="first">A few changes which (hopefully) make compilation possible on Solaris.</p>
</li>
<li><p class="first">Lots of refactoring of the code.</p>
</li>
<li><p class="first">Fixed reference leaks so that unit tests pass with &quot;regrtest -R::&quot;
(at least on Linux).</p>
</li>
</ul>
</div>
<div class="section">
<h1><a id="changes-in-0-40" name="changes-in-0-40">Changes in 0.40</a></h1>
<ul>
<li><p class="first">Removed <cite>SimpleQueue</cite> and <cite>PosixQueue</cite> types. Just use <cite>Queue</cite> instead.</p>
<li><p class="first">Removed <tt class="docutils literal"><span class="pre">SimpleQueue</span></tt> and <tt class="docutils literal"><span class="pre">PosixQueue</span></tt> types. Just use <tt class="docutils literal"><span class="pre">Queue</span></tt> instead.</p>
</li>

@@ -26,3 +88,3 @@ <li><p class="first">Previously if you forgot to use the</p>

<p>idiom on Windows then processes could be created recursively
bringing the computer to its knees. Now <cite>RuntimeError</cite> will be
bringing the computer to its knees. Now <tt class="docutils literal"><span class="pre">RuntimeError</span></tt> will be
raised instead.</p>

@@ -42,13 +104,13 @@ </li>

<li><p class="first">One can now create one-way pipes by doing
<cite>reader, writer = Pipe(duplex=False)</cite>.</p>
<tt class="docutils literal"><span class="pre">reader,</span> <span class="pre">writer</span> <span class="pre">=</span> <span class="pre">Pipe(duplex=False)</span></tt>.</p>
</li>
<li><p class="first">Rewrote code for managing shared memory maps.</p>
</li>
<li><p class="first">Added a <cite>sharedctypes</cite> module for creating <cite>ctypes</cite> objects allocated
<li><p class="first">Added a <tt class="docutils literal"><span class="pre">sharedctypes</span></tt> module for creating <tt class="docutils literal"><span class="pre">ctypes</span></tt> objects allocated
from shared memory. On Python 2.4 this requires the installation of
<cite>ctypes</cite>.</p>
<p><cite>ctypes</cite> objects are not protected by any locks so you will need to
<tt class="docutils literal"><span class="pre">ctypes</span></tt>.</p>
<p><tt class="docutils literal"><span class="pre">ctypes</span></tt> objects are not protected by any locks so you will need to
synchronize access to them (such as by using a lock). However they
can be much faster to access than equivalent objects allocated using
a <cite>LocalManager</cite>.</p>
a <tt class="docutils literal"><span class="pre">LocalManager</span></tt>.</p>
</li>

@@ -58,8 +120,8 @@ <li><p class="first">Rearranged documentation.</p>

<li><p class="first">Previously the C extension caused a segfault on 64 bit machines with
Python 2.5 because it used <cite>int</cite> instead of <cite>Py_ssize_t</cite> in certain
Python 2.5 because it used <tt class="docutils literal"><span class="pre">int</span></tt> instead of <tt class="docutils literal"><span class="pre">Py_ssize_t</span></tt> in certain
places. This is now fixed. Thanks to Alexy Khrabrov for the report.</p>
</li>
<li><p class="first">A fix for <cite>Pool.terminate()</cite>.</p>
<li><p class="first">A fix for <tt class="docutils literal"><span class="pre">Pool.terminate()</span></tt>.</p>
</li>
<li><p class="first">A fix for cleanup behaviour of <cite>Queue</cite>.</p>
<li><p class="first">A fix for cleanup behaviour of <tt class="docutils literal"><span class="pre">Queue</span></tt>.</p>
</li>

@@ -72,35 +134,35 @@ </ul>

<li><p class="first">Have revamped the queue types. Now the queue types are
<cite>Queue</cite>, <cite>SimpleQueue</cite> and (on systems which support it)
<cite>PosixQueue</cite>.</p>
<p>Now <cite>Queue</cite> should behave just like Python's normal <cite>Queue.Queue</cite>
class except that <cite>qsize()</cite>, <cite>task_done()</cite> and <cite>join()</cite> are not
<tt class="docutils literal"><span class="pre">Queue</span></tt>, <tt class="docutils literal"><span class="pre">SimpleQueue</span></tt> and (on systems which support it)
<tt class="docutils literal"><span class="pre">PosixQueue</span></tt>.</p>
<p>Now <tt class="docutils literal"><span class="pre">Queue</span></tt> should behave just like Python's normal <tt class="docutils literal"><span class="pre">Queue.Queue</span></tt>
class except that <tt class="docutils literal"><span class="pre">qsize()</span></tt>, <tt class="docutils literal"><span class="pre">task_done()</span></tt> and <tt class="docutils literal"><span class="pre">join()</span></tt> are not
implemented. In particular, if no maximum size was specified when
the queue was created then <cite>put()</cite> will always succeed without
the queue was created then <tt class="docutils literal"><span class="pre">put()</span></tt> will always succeed without
blocking.</p>
<p>A <cite>SimpleQueue</cite> instance is really just a pipe protected by a couple
of locks. It has <cite>get()</cite>, <cite>put()</cite> and <cite>empty()</cite> methods but does
<p>A <tt class="docutils literal"><span class="pre">SimpleQueue</span></tt> instance is really just a pipe protected by a couple
of locks. It has <tt class="docutils literal"><span class="pre">get()</span></tt>, <tt class="docutils literal"><span class="pre">put()</span></tt> and <tt class="docutils literal"><span class="pre">empty()</span></tt> methods but does
not not support timeouts or non-blocking.</p>
<p><cite>BufferedPipeQueue()</cite> and <cite>PipeQueue()</cite> remain as deprecated
aliases of <cite>Queue()</cite> but <cite>BufferedPosixQueue()</cite> has been removed.
(Not sure if we really need to keep <cite>PosixQueue()</cite>...)</p>
<p><tt class="docutils literal"><span class="pre">BufferedPipeQueue()</span></tt> and <tt class="docutils literal"><span class="pre">PipeQueue()</span></tt> remain as deprecated
aliases of <tt class="docutils literal"><span class="pre">Queue()</span></tt> but <tt class="docutils literal"><span class="pre">BufferedPosixQueue()</span></tt> has been removed.
(Not sure if we really need to keep <tt class="docutils literal"><span class="pre">PosixQueue()</span></tt>...)</p>
</li>
<li><p class="first">Previously the <cite>Pool.shutdown()</cite> method was a little dodgy -- it
could block indefinitely if <cite>map()</cite> or <cite>imap*()</cite> were used and did
<li><p class="first">Previously the <tt class="docutils literal"><span class="pre">Pool.shutdown()</span></tt> method was a little dodgy -- it
could block indefinitely if <tt class="docutils literal"><span class="pre">map()</span></tt> or <tt class="docutils literal"><span class="pre">imap*()</span></tt> were used and did
not try to terminate workers while they were doing a task.</p>
<p>Now there are three new methods <cite>close()</cite>, <cite>terminate()</cite> and
<cite>join()</cite> -- <cite>shutdown()</cite> is retained as a deprecated alias of
<cite>terminate()</cite>. Thanks to Gerald John M. Manipon for feature
request/suggested patch to <cite>shutdown()</cite>.</p>
<p>Now there are three new methods <tt class="docutils literal"><span class="pre">close()</span></tt>, <tt class="docutils literal"><span class="pre">terminate()</span></tt> and
<tt class="docutils literal"><span class="pre">join()</span></tt> -- <tt class="docutils literal"><span class="pre">shutdown()</span></tt> is retained as a deprecated alias of
<tt class="docutils literal"><span class="pre">terminate()</span></tt>. Thanks to Gerald John M. Manipon for feature
request/suggested patch to <tt class="docutils literal"><span class="pre">shutdown()</span></tt>.</p>
</li>
<li><p class="first"><cite>Pool.imap()</cite> and <cite>Pool.imap_unordered()</cite> has gained a <cite>chunksize</cite>
<li><p class="first"><tt class="docutils literal"><span class="pre">Pool.imap()</span></tt> and <tt class="docutils literal"><span class="pre">Pool.imap_unordered()</span></tt> has gained a <tt class="docutils literal"><span class="pre">chunksize</span></tt>
argument which allows the iterable to be submitted to the pool in
chunks. Choosing <cite>chunksize</cite> appropriately makes <cite>Pool.imap()</cite>
almost as fast as <cite>Pool.map()</cite> even for long iterables and cheap
chunks. Choosing <tt class="docutils literal"><span class="pre">chunksize</span></tt> appropriately makes <tt class="docutils literal"><span class="pre">Pool.imap()</span></tt>
almost as fast as <tt class="docutils literal"><span class="pre">Pool.map()</span></tt> even for long iterables and cheap
functions.</p>
</li>
<li><p class="first">Previously on Windows when the cleanup code for a <cite>LocalManager</cite>
<li><p class="first">Previously on Windows when the cleanup code for a <tt class="docutils literal"><span class="pre">LocalManager</span></tt>
attempts to unlink the name of the file which backs the shared
memory map an exception is raised if a child process still exists
which has a handle open for that mmap. This is likely to happen if
a daemon process inherits a <cite>LocalManager</cite> instance.</p>
a daemon process inherits a <tt class="docutils literal"><span class="pre">LocalManager</span></tt> instance.</p>
<p>Now the parent process will remember the filename and attempt to

@@ -110,3 +172,3 @@ unlink the file name again once all the child processes have been

</li>
<li><p class="first"><cite>types.MethodType</cite> is registered with <cite>copy_reg</cite> so now instance
<li><p class="first"><tt class="docutils literal"><span class="pre">types.MethodType</span></tt> is registered with <tt class="docutils literal"><span class="pre">copy_reg</span></tt> so now instance
methods and class methods should be picklable. (Unfortunately there is

@@ -118,5 +180,5 @@ no obvious way of supporting the pickling of staticmethods since

</li>
<li><p class="first">On Windows <cite>reduction.fromfd()</cite> now returns true instances of
<cite>_socket.socket</cite>, so there is no more need for the
<cite>_processing.falsesocket</cite> type.</p>
<li><p class="first">On Windows <tt class="docutils literal"><span class="pre">reduction.fromfd()</span></tt> now returns true instances of
<tt class="docutils literal"><span class="pre">_socket.socket</span></tt>, so there is no more need for the
<tt class="docutils literal"><span class="pre">_processing.falsesocket</span></tt> type.</p>
</li>

@@ -129,12 +191,12 @@ </ul>

<li>Updated metadata and documentation because the project is now hosted
at <cite>developer.berlios.de/projects/pyprocessing</cite>.</li>
<li>The <cite>Pool.join()</cite> method has been removed. <cite>Pool.shutdown()</cite> will
at <tt class="docutils literal"><span class="pre">developer.berlios.de/projects/pyprocessing</span></tt>.</li>
<li>The <tt class="docutils literal"><span class="pre">Pool.join()</span></tt> method has been removed. <tt class="docutils literal"><span class="pre">Pool.shutdown()</span></tt> will
now join the worker processes automatically.</li>
<li>A pool object no longer participates in a reference cycle so
<cite>Pool.shutdown()</cite> should get called as soon as its reference count
<tt class="docutils literal"><span class="pre">Pool.shutdown()</span></tt> should get called as soon as its reference count
falls to zero.</li>
<li>On Windows if <cite>enableLogging()</cite> was used at module scope then the
<li>On Windows if <tt class="docutils literal"><span class="pre">enableLogging()</span></tt> was used at module scope then the
logger used by a child process would often get two copies of the
same handler. To fix this, now specifiying a handler type in
<cite>enableLogging()</cite> will cause any previous handlers used by the
<tt class="docutils literal"><span class="pre">enableLogging()</span></tt> will cause any previous handlers used by the
logger to be discarded.</li>

@@ -147,3 +209,3 @@ </ul>

<li><p class="first">In recent versions on Unix the finalizers in a manager process were
never given a chance to run before <cite>os._exit()</cite> was called, so old
never given a chance to run before <tt class="docutils literal"><span class="pre">os._exit()</span></tt> was called, so old
unlinked AF_UNIX sockets could accumulate in '/tmp'. Fixed.</p>

@@ -162,17 +224,17 @@ </li>

</li>
<li><p class="first">Previously the arguments to <cite>processing.Semaphore()</cite> and
<cite>processing.BoundedSemaphore()</cite> did not have any defaults. The
defaults should be 1 to match <cite>threading</cite>. Fixed.</p>
<li><p class="first">Previously the arguments to <tt class="docutils literal"><span class="pre">processing.Semaphore()</span></tt> and
<tt class="docutils literal"><span class="pre">processing.BoundedSemaphore()</span></tt> did not have any defaults. The
defaults should be 1 to match <tt class="docutils literal"><span class="pre">threading</span></tt>. Fixed.</p>
</li>
<li><p class="first">It should now be possible for a Windows Service created by using
<cite>pywin32</cite> to spawn processes using the <cite>processing</cite> package.</p>
<p>Note that <cite>pywin32</cite> apparently has a bug meaning that <cite>Py_Finalize()</cite>
<tt class="docutils literal"><span class="pre">pywin32</span></tt> to spawn processes using the <tt class="docutils literal"><span class="pre">processing</span></tt> package.</p>
<p>Note that <tt class="docutils literal"><span class="pre">pywin32</span></tt> apparently has a bug meaning that <tt class="docutils literal"><span class="pre">Py_Finalize()</span></tt>
is never called when the service exits so functions registered with
<cite>atexit</cite> never get a chance to run. Therefore it is advisable to
explicitly call <cite>sys.exitfunc()</cite> or <cite>atexit._run_exitfuncs()</cite> at the
end of <cite>ServiceFramework.DoSvcRun()</cite>. Otherwise child processes are
<tt class="docutils literal"><span class="pre">atexit</span></tt> never get a chance to run. Therefore it is advisable to
explicitly call <tt class="docutils literal"><span class="pre">sys.exitfunc()</span></tt> or <tt class="docutils literal"><span class="pre">atexit._run_exitfuncs()</span></tt> at the
end of <tt class="docutils literal"><span class="pre">ServiceFramework.DoSvcRun()</span></tt>. Otherwise child processes are
liable to survive the service when it is stopped. Thanks to Charlie
Hull for the report.</p>
</li>
<li><p class="first">Added <cite>getLogger()</cite> and <cite>enableLogging()</cite> to support logging.</p>
<li><p class="first">Added <tt class="docutils literal"><span class="pre">getLogger()</span></tt> and <tt class="docutils literal"><span class="pre">enableLogging()</span></tt> to support logging.</p>
</li>

@@ -184,21 +246,21 @@ </ul>

<ul>
<li><p class="first">By default processes are no longer be stoppable using the <cite>stop()</cite>
method: one must call <cite>setStoppable(True)</cite> before <cite>start()</cite> in order
to use the <cite>stop()</cite> method. (Note that <cite>terminate()</cite> will work
<li><p class="first">By default processes are no longer be stoppable using the <tt class="docutils literal"><span class="pre">stop()</span></tt>
method: one must call <tt class="docutils literal"><span class="pre">setStoppable(True)</span></tt> before <tt class="docutils literal"><span class="pre">start()</span></tt> in order
to use the <tt class="docutils literal"><span class="pre">stop()</span></tt> method. (Note that <tt class="docutils literal"><span class="pre">terminate()</span></tt> will work
regardless of whether the process is marked as being &quot;stoppable&quot;.)</p>
<p>The reason for this is that on Windows getting <cite>stop()</cite> to work
<p>The reason for this is that on Windows getting <tt class="docutils literal"><span class="pre">stop()</span></tt> to work
involves starting a new console for the child process and installing
a signal handler for the <cite>SIGBREAK</cite> signal. This unfortunately
a signal handler for the <tt class="docutils literal"><span class="pre">SIGBREAK</span></tt> signal. This unfortunately
means that Ctrl-Break cannot not be used to kill all processes of
the program.</p>
</li>
<li><p class="first">Added <cite>setStoppable()</cite> and <cite>getStoppable()</cite> methods -- see above.</p>
<li><p class="first">Added <tt class="docutils literal"><span class="pre">setStoppable()</span></tt> and <tt class="docutils literal"><span class="pre">getStoppable()</span></tt> methods -- see above.</p>
</li>
<li><p class="first">Added <cite>BufferedQueue</cite>/<cite>BufferedPipeQueue</cite>/<cite>BufferedPosixQueue</cite>.
<li><p class="first">Added <tt class="docutils literal"><span class="pre">BufferedQueue</span></tt>/<tt class="docutils literal"><span class="pre">BufferedPipeQueue</span></tt>/<tt class="docutils literal"><span class="pre">BufferedPosixQueue</span></tt>.
Putting an object on a buffered queue will always succeed without
blocking (just like with <cite>Queue.Queue</cite> if no maximum size is
blocking (just like with <tt class="docutils literal"><span class="pre">Queue.Queue</span></tt> if no maximum size is
specified). This makes them potentially safer than the normal queue
types provided by <cite>processing</cite> which have finite capacity and may
types provided by <tt class="docutils literal"><span class="pre">processing</span></tt> which have finite capacity and may
cause deadlocks if they fill.</p>
<p><cite>test/test_worker.py</cite> has been updated to use <cite>BufferedQueue</cite> for
<p><tt class="docutils literal"><span class="pre">test/test_worker.py</span></tt> has been updated to use <tt class="docutils literal"><span class="pre">BufferedQueue</span></tt> for
the task queue instead of explicitly spawning a thread to feed tasks

@@ -208,9 +270,9 @@ to the queue without risking a deadlock.</p>

<li><p class="first">Now when the NO_SEM_TIMED macro is set polling will be used to get
around the lack of <cite>sem_timedwait()</cite>. This means that
<cite>Condition.wait()</cite> and <cite>Queue.get()</cite> should now work with timeouts
around the lack of <tt class="docutils literal"><span class="pre">sem_timedwait()</span></tt>. This means that
<tt class="docutils literal"><span class="pre">Condition.wait()</span></tt> and <tt class="docutils literal"><span class="pre">Queue.get()</span></tt> should now work with timeouts
on Mac OS X.</p>
</li>
<li><p class="first">Added a <cite>callback</cite> argument to <cite>Pool.apply_async()</cite>.</p>
<li><p class="first">Added a <tt class="docutils literal"><span class="pre">callback</span></tt> argument to <tt class="docutils literal"><span class="pre">Pool.apply_async()</span></tt>.</p>
</li>
<li><p class="first">Added <cite>test/test_httpserverpool.py</cite> which runs a pool of http
<li><p class="first">Added <tt class="docutils literal"><span class="pre">test/test_httpserverpool.py</span></tt> which runs a pool of http
servers which share a single listening socket.</p>

@@ -224,5 +286,5 @@ </li>

</li>
<li><p class="first">Fixed bug in the iterator returned by <cite>Pool.imap()</cite>.</p>
<li><p class="first">Fixed bug in the iterator returned by <tt class="docutils literal"><span class="pre">Pool.imap()</span></tt>.</p>
</li>
<li><p class="first">Fixed bug in <cite>Condition.__repr__()</cite>.</p>
<li><p class="first">Fixed bug in <tt class="docutils literal"><span class="pre">Condition.__repr__()</span></tt>.</p>
</li>

@@ -239,6 +301,6 @@ <li><p class="first">Fixed a handle/file descriptor leak when sockets or connections are

trying to import it failed with &quot;undefined symbol: _sem_timedwait&quot;.
Unfortunately the <cite>ImportError</cite> exception was silently swallowed.</p>
<p>This is now fixed by using the <cite>NO_SEM_TIMED</cite> macro. Unfortunately
this means that some methods like <cite>Condition.wait()</cite> and
<cite>Queue.get()</cite> will not work with timeouts on Mac OS X. If you
Unfortunately the <tt class="docutils literal"><span class="pre">ImportError</span></tt> exception was silently swallowed.</p>
<p>This is now fixed by using the <tt class="docutils literal"><span class="pre">NO_SEM_TIMED</span></tt> macro. Unfortunately
this means that some methods like <tt class="docutils literal"><span class="pre">Condition.wait()</span></tt> and
<tt class="docutils literal"><span class="pre">Queue.get()</span></tt> will not work with timeouts on Mac OS X. If you
really need to be able to use timeouts then you can always use the

@@ -248,6 +310,6 @@ equivalent objects created with a manager. Thanks to Doug Hellmann

</li>
<li><p class="first">Added a <cite>terminate()</cite> method to process objects which is more
forceful than <cite>stop()</cite>.</p>
<li><p class="first">Added a <tt class="docutils literal"><span class="pre">terminate()</span></tt> method to process objects which is more
forceful than <tt class="docutils literal"><span class="pre">stop()</span></tt>.</p>
</li>
<li><p class="first">Fixed bug in the cleanup function registered with <cite>atexit</cite> which on
<li><p class="first">Fixed bug in the cleanup function registered with <tt class="docutils literal"><span class="pre">atexit</span></tt> which on
Windows could cause a process which is shutting down to deadlock

@@ -257,10 +319,10 @@ waiting for a manager to exit. Thanks to Dominique Wahli for report

</li>
<li><p class="first">Added <cite>test/test_workers.py</cite> which gives an example of how to create
<li><p class="first">Added <tt class="docutils literal"><span class="pre">test/test_workers.py</span></tt> which gives an example of how to create
a collection of worker processes which execute tasks from one queue
and return results on another.</p>
</li>
<li><p class="first">Added <cite>processing.Pool()</cite> which returns a process pool object. This
<li><p class="first">Added <tt class="docutils literal"><span class="pre">processing.Pool()</span></tt> which returns a process pool object. This
allows one to execute functions asynchronously. It also has a
parallel implementation of the <cite>map()</cite> builtin. This is still
<em>experimental</em> and undocumented --- see <cite>test/test_pool.py</cite> for
parallel implementation of the <tt class="docutils literal"><span class="pre">map()</span></tt> builtin. This is still
<em>experimental</em> and undocumented --- see <tt class="docutils literal"><span class="pre">test/test_pool.py</span></tt> for
example usage.</p>

@@ -273,6 +335,6 @@ </li>

<ul>
<li><p class="first">Added a <cite>recvbytes_into()</cite> method for receiving byte data into
<li><p class="first">Added a <tt class="docutils literal"><span class="pre">recvbytes_into()</span></tt> method for receiving byte data into
objects with the writable buffer interface. Also renamed the
<cite>_recv_string()</cite> and <cite>_send_string()</cite> methods of connection objects
to <cite>recvbytes()</cite> and <cite>sendbytes()</cite>.</p>
<tt class="docutils literal"><span class="pre">_recv_string()</span></tt> and <tt class="docutils literal"><span class="pre">_send_string()</span></tt> methods of connection objects
to <tt class="docutils literal"><span class="pre">recvbytes()</span></tt> and <tt class="docutils literal"><span class="pre">sendbytes()</span></tt>.</p>
</li>

@@ -282,19 +344,19 @@ <li><p class="first">Some optimizations for the transferring of large blocks of data

</li>
<li><p class="first">On Unix <cite>os.sysconf()</cite> is now used by default to determine whether
<li><p class="first">On Unix <tt class="docutils literal"><span class="pre">os.sysconf()</span></tt> is now used by default to determine whether
to compile in support for posix semaphores or posix message queues.</p>
<p>By using the <cite>NO_SEM_TIMED</cite> and <cite>NO_MQ_TIMED</cite> macros (see
<cite>INSTALL.txt</cite>) it should now also be possible to compile in
<p>By using the <tt class="docutils literal"><span class="pre">NO_SEM_TIMED</span></tt> and <tt class="docutils literal"><span class="pre">NO_MQ_TIMED</span></tt> macros (see
<tt class="docutils literal"><span class="pre">INSTALL.txt</span></tt>) it should now also be possible to compile in
(partial) semaphore or queue support on Unix systems which lack the
timeout functions <cite>sem_timedwait()</cite> or <cite>mq_timedreceive()</cite> and
<cite>mq_timesend()</cite>.</p>
timeout functions <tt class="docutils literal"><span class="pre">sem_timedwait()</span></tt> or <tt class="docutils literal"><span class="pre">mq_timedreceive()</span></tt> and
<tt class="docutils literal"><span class="pre">mq_timesend()</span></tt>.</p>
</li>
<li><p class="first"><cite>gettimeofday()</cite> is now used instead of <cite>clock_gettime()</cite> making
<li><p class="first"><tt class="docutils literal"><span class="pre">gettimeofday()</span></tt> is now used instead of <tt class="docutils literal"><span class="pre">clock_gettime()</span></tt> making
compilation of the C extension (hopefully) possible on Mac OSX. No
modificaton of <cite>setup.py</cite> should be necessary. Thanks to Michele
modificaton of <tt class="docutils literal"><span class="pre">setup.py</span></tt> should be necessary. Thanks to Michele
Bertoldi for report and proposed patch.</p>
</li>
<li><p class="first"><cite>cpuCount()</cite> function added which returns the number of CPUs
<li><p class="first"><tt class="docutils literal"><span class="pre">cpuCount()</span></tt> function added which returns the number of CPUs
in the system.</p>
</li>
<li><p class="first">Bugfixes to <cite>PosixQueue</cite> class.</p>
<li><p class="first">Bugfixes to <tt class="docutils literal"><span class="pre">PosixQueue</span></tt> class.</p>
</li>

@@ -306,16 +368,16 @@ </ul>

<ul class="simple">
<li>Refactored and simplified <cite>_nonforking</cite> module -- info about
<cite>sys.modules</cite> of parent process is no longer passed on to child
process. Also <cite>pkgutil</cite> is no longer used.</li>
<li>Allocated space from an mmap used by <cite>LocalManager</cite> will now be
<li>Refactored and simplified <tt class="docutils literal"><span class="pre">_nonforking</span></tt> module -- info about
<tt class="docutils literal"><span class="pre">sys.modules</span></tt> of parent process is no longer passed on to child
process. Also <tt class="docutils literal"><span class="pre">pkgutil</span></tt> is no longer used.</li>
<li>Allocated space from an mmap used by <tt class="docutils literal"><span class="pre">LocalManager</span></tt> will now be
recycled.</li>
<li>Better tests for <cite>LocalManager</cite>.</li>
<li>Fixed bug in <cite>managers.py</cite> concerning refcounting of shared objects.
<li>Better tests for <tt class="docutils literal"><span class="pre">LocalManager</span></tt>.</li>
<li>Fixed bug in <tt class="docutils literal"><span class="pre">managers.py</span></tt> concerning refcounting of shared objects.
Bug affects the case where the callable used to create a shared
object does not return a unique object each time it is called.
Thanks to Alexey Akimov for the report.</li>
<li>Added a <cite>freezeSupport()</cite> function. Calling this at the appropriate
<li>Added a <tt class="docutils literal"><span class="pre">freezeSupport()</span></tt> function. Calling this at the appropriate
point in the main module is necessary when freezing a multiprocess
program to produce a Windows executable. (Has been tested with
<cite>py2exe</cite>, <cite>PyInstaller</cite> and <cite>cx_Freeze</cite>.)</li>
<tt class="docutils literal"><span class="pre">py2exe</span></tt>, <tt class="docutils literal"><span class="pre">PyInstaller</span></tt> and <tt class="docutils literal"><span class="pre">cx_Freeze</span></tt>.)</li>
</ul>

@@ -326,5 +388,5 @@ </div>

<ul class="simple">
<li>Fixed one line bug in <cite>localmanager.py</cite> which caused shared memory maps
<li>Fixed one line bug in <tt class="docutils literal"><span class="pre">localmanager.py</span></tt> which caused shared memory maps
not to be resized properly.</li>
<li>Added tests for shared values/structs/arrays to <cite>test/test_processing</cite>.</li>
<li>Added tests for shared values/structs/arrays to <tt class="docutils literal"><span class="pre">test/test_processing</span></tt>.</li>
</ul>

@@ -336,5 +398,5 @@ </div>

<li><p class="first">Process objects now support the complete API of thread objects.</p>
<p>In particular <cite>isAlive()</cite>, <cite>isDaemon()</cite>, <cite>setDaemon()</cite> have been
added and <cite>join()</cite> now supports the <cite>timeout</cite> paramater.</p>
<p>There are also new methods <cite>stop()</cite>, <cite>getPid()</cite> and <cite>getExitCode()</cite>.</p>
<p>In particular <tt class="docutils literal"><span class="pre">isAlive()</span></tt>, <tt class="docutils literal"><span class="pre">isDaemon()</span></tt>, <tt class="docutils literal"><span class="pre">setDaemon()</span></tt> have been
added and <tt class="docutils literal"><span class="pre">join()</span></tt> now supports the <tt class="docutils literal"><span class="pre">timeout</span></tt> paramater.</p>
<p>There are also new methods <tt class="docutils literal"><span class="pre">stop()</span></tt>, <tt class="docutils literal"><span class="pre">getPid()</span></tt> and <tt class="docutils literal"><span class="pre">getExitCode()</span></tt>.</p>
</li>

@@ -345,8 +407,8 @@ <li><p class="first">Implemented synchronization primitives based on the Windows mutexes

<li><p class="first">Added support for sharing simple objects between processes by using
a shared memory map and the <cite>struct</cite> or <cite>array</cite> modules.</p>
a shared memory map and the <tt class="docutils literal"><span class="pre">struct</span></tt> or <tt class="docutils literal"><span class="pre">array</span></tt> modules.</p>
</li>
<li><p class="first">An <cite>activeChildren()</cite> function has been added to <cite>processing</cite> which
<li><p class="first">An <tt class="docutils literal"><span class="pre">activeChildren()</span></tt> function has been added to <tt class="docutils literal"><span class="pre">processing</span></tt> which
returns a list of the child processes which are still alive.</p>
</li>
<li><p class="first">A <cite>Pipe()</cite> function has been added which returns a pair of
<li><p class="first">A <tt class="docutils literal"><span class="pre">Pipe()</span></tt> function has been added which returns a pair of
connection objects representing the ends of a duplex connection over

@@ -356,23 +418,23 @@ which picklable objects can be sent.</p>

<li><p class="first">socket objects etc are now picklable and can be transferred between
processes. (Requires compilation of the <cite>_processing</cite> extension.)</p>
processes. (Requires compilation of the <tt class="docutils literal"><span class="pre">_processing</span></tt> extension.)</p>
</li>
<li><p class="first">Subclasses of <cite>managers.BaseManager</cite> no longer automatically spawn a
child process when an instance is created: the <cite>start()</cite> method must be
<li><p class="first">Subclasses of <tt class="docutils literal"><span class="pre">managers.BaseManager</span></tt> no longer automatically spawn a
child process when an instance is created: the <tt class="docutils literal"><span class="pre">start()</span></tt> method must be
called explicitly.</p>
</li>
<li><p class="first">On Windows child processes are now spawned using <cite>subprocess</cite>.</p>
<li><p class="first">On Windows child processes are now spawned using <tt class="docutils literal"><span class="pre">subprocess</span></tt>.</p>
</li>
<li><p class="first">On Windows the Python 2.5 version of <cite>pkgutil</cite> is now used for
loading modules by the <cite>_nonforking</cite> module. On Python 2.4 this
version of <cite>pkgutil</cite> (which uses the standard Python licence) is
included in <cite>processing.compat</cite>.</p>
<li><p class="first">On Windows the Python 2.5 version of <tt class="docutils literal"><span class="pre">pkgutil</span></tt> is now used for
loading modules by the <tt class="docutils literal"><span class="pre">_nonforking</span></tt> module. On Python 2.4 this
version of <tt class="docutils literal"><span class="pre">pkgutil</span></tt> (which uses the standard Python licence) is
included in <tt class="docutils literal"><span class="pre">processing.compat</span></tt>.</p>
</li>
<li><p class="first">The arguments to the functions in <cite>processing.connection</cite> have
<li><p class="first">The arguments to the functions in <tt class="docutils literal"><span class="pre">processing.connection</span></tt> have
changed slightly.</p>
</li>
<li><p class="first">Connection objects now have a <cite>poll()</cite> method which tests whether
<li><p class="first">Connection objects now have a <tt class="docutils literal"><span class="pre">poll()</span></tt> method which tests whether
there is any data available for reading.</p>
</li>
<li><p class="first">The <cite>test/py2exedemo</cite> folder shows how to get <cite>py2exe</cite> to create a
Windows executable from a program using the <cite>processing</cite> package.</p>
<li><p class="first">The <tt class="docutils literal"><span class="pre">test/py2exedemo</span></tt> folder shows how to get <tt class="docutils literal"><span class="pre">py2exe</span></tt> to create a
Windows executable from a program using the <tt class="docutils literal"><span class="pre">processing</span></tt> package.</p>
</li>

@@ -392,3 +454,3 @@ <li><p class="first">More tests.</p>

referent which have been explicitly exposed.</li>
<li>The <cite>connection</cite> sub-package now supports digest authentication.</li>
<li>The <tt class="docutils literal"><span class="pre">connection</span></tt> sub-package now supports digest authentication.</li>
<li>Process objects are now given randomly generated 'inheritable'

@@ -398,3 +460,3 @@ authentication keys.</li>

using the same authentication key.</li>
<li>Previously <cite>get_module()</cite> from <cite>_nonforking.py</cite> was seriously messed
<li>Previously <tt class="docutils literal"><span class="pre">get_module()</span></tt> from <tt class="docutils literal"><span class="pre">_nonforking.py</span></tt> was seriously messed
up (though it generally worked). It is a lot saner now.</li>

@@ -407,13 +469,13 @@ <li>Python 2.4 or higher is now required.</li>

<ul class="simple">
<li>The <cite>doc</cite> folder contains HTML documentation.</li>
<li><cite>test</cite> is now a subpackage. Running <cite>processing.test.main()</cite>
<li>The <tt class="docutils literal"><span class="pre">doc</span></tt> folder contains HTML documentation.</li>
<li><tt class="docutils literal"><span class="pre">test</span></tt> is now a subpackage. Running <tt class="docutils literal"><span class="pre">processing.test.main()</span></tt>
will run test scripts using both processes and threads.</li>
<li><cite>nonforking.py</cite> has been renamed <cite>_nonforking.py</cite>.
<cite>manager.py</cite> has been renamed <cite>manager.py</cite>.
<cite>connection.py</cite> has become a sub-package <cite>connection</cite></li>
<li><cite>Listener</cite> and <cite>Client</cite> have been removed from
<cite>processing</cite>, but still exist in <cite>processing.connection</cite>.</li>
<li><tt class="docutils literal"><span class="pre">nonforking.py</span></tt> has been renamed <tt class="docutils literal"><span class="pre">_nonforking.py</span></tt>.
<tt class="docutils literal"><span class="pre">manager.py</span></tt> has been renamed <tt class="docutils literal"><span class="pre">manager.py</span></tt>.
<tt class="docutils literal"><span class="pre">connection.py</span></tt> has become a sub-package <tt class="docutils literal"><span class="pre">connection</span></tt></li>
<li><tt class="docutils literal"><span class="pre">Listener</span></tt> and <tt class="docutils literal"><span class="pre">Client</span></tt> have been removed from
<tt class="docutils literal"><span class="pre">processing</span></tt>, but still exist in <tt class="docutils literal"><span class="pre">processing.connection</span></tt>.</li>
<li>The package is now <em>probably</em> compatible with versions of Python
earlier than 2.4.</li>
<li><cite>set</cite> is no longer a type supported by the default manager type.</li>
<li><tt class="docutils literal"><span class="pre">set</span></tt> is no longer a type supported by the default manager type.</li>
<li>Many more changes.</li>

@@ -425,8 +487,8 @@ </ul>

<ul class="simple">
<li>Fixed bug where the arguments to <cite>processing.Manager()</cite> were passed on
to <cite>processing.manager.DefaultManager()</cite> in the wrong order.</li>
<li><cite>processing.dummy</cite> is now a subpackage of <cite>processing</cite>
<li>Fixed bug where the arguments to <tt class="docutils literal"><span class="pre">processing.Manager()</span></tt> were passed on
to <tt class="docutils literal"><span class="pre">processing.manager.DefaultManager()</span></tt> in the wrong order.</li>
<li><tt class="docutils literal"><span class="pre">processing.dummy</span></tt> is now a subpackage of <tt class="docutils literal"><span class="pre">processing</span></tt>
instead of a module.</li>
<li>Rearranged package so that the <cite>test</cite> folder, <cite>README.txt</cite> and
<cite>CHANGES.txt</cite> are copied when the package is installed.</li>
<li>Rearranged package so that the <tt class="docutils literal"><span class="pre">test</span></tt> folder, <tt class="docutils literal"><span class="pre">README.txt</span></tt> and
<tt class="docutils literal"><span class="pre">CHANGES.txt</span></tt> are copied when the package is installed.</li>
</ul>

@@ -437,10 +499,10 @@ </div>

<ul class="simple">
<li>Fixed bug on windows when the full path of <cite>nonforking.py</cite> contains a
<li>Fixed bug on windows when the full path of <tt class="docutils literal"><span class="pre">nonforking.py</span></tt> contains a
space.</li>
<li>On unix there is no longer a need to make the arguments to the
constructor of <cite>Process</cite> be picklable or for and instance of a
subclass of <cite>Process</cite> to be picklable when you call the start method.</li>
constructor of <tt class="docutils literal"><span class="pre">Process</span></tt> be picklable or for and instance of a
subclass of <tt class="docutils literal"><span class="pre">Process</span></tt> to be picklable when you call the start method.</li>
<li>On unix proxies which a child process inherits from its parent can
be used by the child without any problem, so there is no longer a
need to pass them as arguments to <cite>Process</cite>. (This will never be
need to pass them as arguments to <tt class="docutils literal"><span class="pre">Process</span></tt>. (This will never be
possible on windows.)</li>

@@ -447,0 +509,0 @@ </ul>

@@ -18,3 +18,3 @@ <?xml version="1.0" encoding="utf-8" ?>

<p>Connection objects allow the sending and receiving of picklable
objects or strings. They can be thought of a message oriented
objects or strings. They can be thought of as message oriented
connected sockets.</p>

@@ -33,3 +33,4 @@ <p>Connection objects usually created using <tt class="docutils literal"><span class="pre">processing.Pipe()</span></tt> -- see

<dd>Return an object sent from the other end of the connection
using <tt class="docutils literal"><span class="pre">send()</span></tt>.</dd>
using <tt class="docutils literal"><span class="pre">send()</span></tt>. Raises <tt class="docutils literal"><span class="pre">EOFError</span></tt> if there is nothing left to
receive and the other end was closed.</dd>
<dt><tt class="docutils literal"><span class="pre">fileno()</span></tt></dt>

@@ -42,5 +43,9 @@ <dd>Returns the file descriptor or handle used by the connection.</dd>

</dd>
<dt><tt class="docutils literal"><span class="pre">poll(timeout=0)</span></tt></dt>
<dd>Return whether there is any data available to be read within
<tt class="docutils literal"><span class="pre">timeout</span></tt> seconds.</dd>
<dt><tt class="docutils literal"><span class="pre">poll(timeout=0.0)</span></tt></dt>
<dd><p class="first">Return whether there is any data available to be read within
<tt class="docutils literal"><span class="pre">timeout</span></tt> seconds.</p>
<p>If <tt class="docutils literal"><span class="pre">timeout</span></tt> is <tt class="docutils literal"><span class="pre">None</span></tt> then an infinite timeout is used.</p>
<p class="last">Unlike the other blocking methods on Windows this method can
be interrupted by Ctrl-C.</p>
</dd>
<dt><tt class="docutils literal"><span class="pre">sendbytes(buffer)</span></tt></dt>

@@ -53,7 +58,9 @@ <dd><p class="first">Send byte data from an object supporting the buffer interface

<dd>Return a complete message of byte data sent from the other end
of the connection as a string.</dd>
of the connection as a string. Raises <tt class="docutils literal"><span class="pre">EOFError</span></tt> if there is
nothing left to receive and the other end was closed.</dd>
<dt><tt class="docutils literal"><span class="pre">recvbytes_into(buffer,</span> <span class="pre">offset=0)</span></tt></dt>
<dd><p class="first">Read into <tt class="docutils literal"><span class="pre">buffer</span></tt> at position <tt class="docutils literal"><span class="pre">offset</span></tt> a complete message of
byte data sent from the other end of the connection and return
the number of bytes in the message.</p>
the number of bytes in the message. Raises <tt class="docutils literal"><span class="pre">EOFError</span></tt> if
there is nothing left to receive and the other end was closed.</p>
<p><tt class="docutils literal"><span class="pre">buffer</span></tt> must be an object satisfying the writable buffer

@@ -63,4 +70,4 @@ interface and <tt class="docutils literal"><span class="pre">offset</span></tt> must be non-negative and less than

<p class="last">If the buffer is too short then a <tt class="docutils literal"><span class="pre">BufferTooShort</span></tt> exception
is raised and the complete message of bytes data is available
as <tt class="docutils literal"><span class="pre">e.args[0]</span></tt> where <tt class="docutils literal"><span class="pre">e</span></tt> is the exception instance.</p>
is raised and the complete message is available as <tt class="docutils literal"><span class="pre">e.args[0]</span></tt>
where <tt class="docutils literal"><span class="pre">e</span></tt> is the exception instance.</p>
</dd>

@@ -67,0 +74,0 @@ </dl>

@@ -8,3 +8,3 @@ .. include:: header.txt

Connection objects allow the sending and receiving of picklable
objects or strings. They can be thought of a message oriented
objects or strings. They can be thought of as message oriented
connected sockets.

@@ -25,3 +25,4 @@

Return an object sent from the other end of the connection
using `send()`.
using `send()`. Raises `EOFError` if there is nothing left to
receive and the other end was closed.

@@ -37,6 +38,11 @@ `fileno()`

`poll(timeout=0)`
`poll(timeout=0.0)`
Return whether there is any data available to be read within
`timeout` seconds.
`timeout` seconds.
If `timeout` is `None` then an infinite timeout is used.
Unlike the other blocking methods on Windows this method can
be interrupted by Ctrl-C.
`sendbytes(buffer)`

@@ -50,3 +56,4 @@ Send byte data from an object supporting the buffer interface

Return a complete message of byte data sent from the other end
of the connection as a string.
of the connection as a string. Raises `EOFError` if there is
nothing left to receive and the other end was closed.

@@ -56,3 +63,4 @@ `recvbytes_into(buffer, offset=0)`

byte data sent from the other end of the connection and return
the number of bytes in the message.
the number of bytes in the message. Raises `EOFError` if
there is nothing left to receive and the other end was closed.

@@ -64,4 +72,4 @@ `buffer` must be an object satisfying the writable buffer

If the buffer is too short then a `BufferTooShort` exception
is raised and the complete message of bytes data is available
as `e.args[0]` where `e` is the exception instance.
is raised and the complete message is available as `e.args[0]`
where `e` is the exception instance.

@@ -94,3 +102,3 @@

which sent the message.
Therefore, unless the connection object was produced using

@@ -100,4 +108,4 @@ `Pipe()` you should only use the `recv()` and `send()` methods

keys <connection-ref.html#authentication-keys>`_.
.. warning::

@@ -104,0 +112,0 @@

@@ -129,3 +129,3 @@ <?xml version="1.0" encoding="utf-8" ?>

<p class="last">This is called automatically when the listener is garbage
collected.</p>
collected. However it is advisable to call it explicitly.</p>
</dd>

@@ -132,0 +132,0 @@ </dl>

@@ -135,3 +135,3 @@ .. include:: header.txt

This is called automatically when the listener is garbage
collected.
collected. However it is advisable to call it explicitly.

@@ -138,0 +138,0 @@

/*
:Author: David Goodger
:Contact: goodger@users.sourceforge.net
:Date: $Date: 2007/02/01 04:08:29 $
:Revision: $Revision: 1.1 $
:Date: $Date: 2008/01/29 22:14:02 $
:Revision: $Revision: 1.1.1.1 $
:Copyright: This stylesheet has been placed in the public domain.

@@ -7,0 +7,0 @@

@@ -7,3 +7,3 @@ <?xml version="1.0" encoding="utf-8" ?>

<meta name="generator" content="Docutils 0.4: http://docutils.sourceforge.net/" />
<title>Documentation for processing-0.40</title>
<title>Documentation for processing-0.50</title>
<meta name="author" content="R Oudkerk" />

@@ -18,3 +18,3 @@ <link rel="stylesheet" href="html4css1.css" type="text/css" />

<div class="document" id="documentation-for-processing-version">
<h1 class="title">Documentation for processing-0.40</h1>
<h1 class="title">Documentation for processing-0.50</h1>
<table class="docinfo" frame="void" rules="none">

@@ -56,3 +56,3 @@ <col class="docinfo-name" />

</li>
<li><p class="first"><a class="reference" href="tests.html">Tests</a></p>
<li><p class="first"><a class="reference" href="tests.html">Tests and examples</a></p>
</li>

@@ -59,0 +59,0 @@ </ul>

@@ -30,3 +30,3 @@ .. include:: header.txt

* `Tests <tests.html>`_
* `Tests and examples <tests.html>`_

@@ -33,0 +33,0 @@

@@ -13,7 +13,11 @@ <?xml version="1.0" encoding="utf-8" ?>

<h1 class="title">Installation of processing</h1>
<p>Versions earlier than Python 2.4 are not supported.</p>
<p>If you are using Windows then binary builds for Python 2.4 and Python 2.5
are available at</p>
<p>Versions earlier than Python 2.4 are not supported. If you are using
Python 2.4 then you should install the <tt class="docutils literal"><span class="pre">ctypes</span></tt> package (which comes
automatically with Python 2.5).</p>
<p>Windows binary builds for Python 2.4 and Python 2.5 are available at</p>
<blockquote>
<a class="reference" href="http://pyprocessing.berlios.de">http://pyprocessing.berlios.de</a></blockquote>
<p>or</p>
<blockquote>
<a class="reference" href="http://cheeseshop.python.org/pypi/processing">http://cheeseshop.python.org/pypi/processing</a></blockquote>
<p>Otherwise, if you have the correct C compiler setup then the source

@@ -24,30 +28,30 @@ distribution can be installed the usual way:</p>

</pre>
<p>It should not be necessary to do any editing of <cite>setup.py</cite> if you are
using Windows, Mac OS X or (a recent) Linux. On other unices it may be
necessary to modify the values of the <cite>macros</cite> dictionary or
<cite>libraries</cite> list. The section to modify reads</p>
<p>It should not be necessary to do any editing of <tt class="docutils literal"><span class="pre">setup.py</span></tt> if you are
using Windows, Mac OS X or Linux. On other unices it may be necessary
to modify the values of the <tt class="docutils literal"><span class="pre">macros</span></tt> dictionary or <tt class="docutils literal"><span class="pre">libraries</span></tt> list.
The section to modify reads</p>
<pre class="literal-block">
macros = dict(
# should we include support for posix semaphores?
USE_POSIX_SEMAPHORE=have_feature('SC_SEMAPHORES'),
# does semaphore support lack sem_timedwait()?
NO_SEM_TIMED=0,
)
# linux needs librt - other unices may not
libraries = ['rt']
else:
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1
)
libraries = ['rt']
</pre>
<p>Note that if support for posix semaphores has not been compiled in
then many of the functions in the <cite>processing</cite> namespace like
<cite>Lock()</cite>, <cite>Queue()</cite> or <cite>LocalManager()</cite> will be available. However,
one can still create a manager using <cite>manager = processing.Manager()</cite>
and then do <cite>lock = manager.Lock()</cite> etc.</p>
<p>More details can be found in the comments in <tt class="docutils literal"><span class="pre">setup.py</span></tt>.</p>
<p>Note that if you use <tt class="docutils literal"><span class="pre">HAVE_SEM_OPEN=0</span></tt> then support for posix
semaphores will not been compiled in, and then many of the functions
in the <tt class="docutils literal"><span class="pre">processing</span></tt> namespace like <tt class="docutils literal"><span class="pre">Lock()</span></tt>, <tt class="docutils literal"><span class="pre">Queue()</span></tt> or will not be
available. However, one can still create a manager using <tt class="docutils literal"><span class="pre">manager</span> <span class="pre">=</span>
<span class="pre">processing.Manager()</span></tt> and then do <tt class="docutils literal"><span class="pre">lock</span> <span class="pre">=</span> <span class="pre">manager.Lock()</span></tt> etc.</p>
<div class="section">
<h1><a id="running-test-scripts" name="running-test-scripts">Running test scripts</a></h1>
<h1><a id="running-tests" name="running-tests">Running tests</a></h1>
<p>To run the test scripts using Python 2.5 do</p>
<pre class="literal-block">
python -m processing.test
python -m processing.tests
</pre>
<p>and on Python 2.4 do</p>
<pre class="literal-block">
python -c &quot;from processing.test import main; main()&quot;
python -c &quot;from processing.tests import main; main()&quot;
</pre>

@@ -54,0 +58,0 @@ <p>This will run a number of test scripts using both processes and threads.</p>

@@ -85,3 +85,3 @@ <?xml version="1.0" encoding="utf-8" ?>

if __name__ == '__main__':
p = Process(target=f, args=['bob'])
p = Process(target=f, args=('bob',))
p.start()

@@ -110,3 +110,3 @@ p.join()

q = Queue()
p = Process(target=f, args=[q])
p = Process(target=f, args=(q,))
p.start()

@@ -126,7 +126,8 @@ print q.get() # prints &quot;[42, None, 'hello']&quot;

def f(conn):
conn.send([42, None, 'hello'])
conn.send([42, None, 'hello'])
conn.close()
if __name__ == '__main__':
parent_conn, child_conn = Pipe()
p = Process(target=f, args=[child_conn])
p = Process(target=f, args=(child_conn,))
p.start()

@@ -136,9 +137,9 @@ print parent_conn.recv() # prints &quot;[42, None, 'hello']&quot;

</pre>
<p class="last">The two connection objects returned by <tt class="docutils literal"><span class="pre">Pipe()</span></tt> represent the
two ends of the pipe. Each connection object has <tt class="docutils literal"><span class="pre">send()</span></tt> and
<p class="last">The two connection objects returned by <tt class="docutils literal"><span class="pre">Pipe()</span></tt> represent the two
ends of the pipe. Each connection object has <tt class="docutils literal"><span class="pre">send()</span></tt> and
<tt class="docutils literal"><span class="pre">recv()</span></tt> methods (among others). Note that data in a pipe may
become corrupted if two processes (or threads) try to
read/write to the <em>same</em> end of the pipe at the same time. Of
course there is no risk of corruption from processes using
different ends of the pipe at the same time. See <a class="reference" href="processing-ref.html#pipes-and-queues">Pipes</a>.</p>
become corrupted if two processes (or threads) try to read from or
write to the <em>same</em> end of the pipe at the same time. Of course
there is no risk of corruption from processes using different ends
of the pipe at the same time. See <a class="reference" href="processing-ref.html#pipes-and-queues">Pipes</a>.</p>
</dd>

@@ -164,3 +165,3 @@ </dl>

for num in range(10):
Process(target=f, args=[lock, num]).start()
Process(target=f, args=(lock, num)).start()
</pre>

@@ -172,19 +173,16 @@ <p>Without using the lock output from the different processes is liable

<h1><a id="sharing-state-between-processes" name="sharing-state-between-processes">Sharing state between processes</a></h1>
<p>As mentioned above, when doing threaded programming it is usually best
to avoid using shared state as far as possible. This is particularly
true when using multiple processes.</p>
<p>However, if you really do need to use some shared data then you can
create by using a <em>manager</em>. Managers can store their data in one of
two ways:</p>
<p>As mentioned above, when doing concurrent programming it is usually
best to avoid using shared state as far as possible. This is
particularly true when using multiple processes.</p>
<p>However, if you really do need to use some shared data then
<tt class="docutils literal"><span class="pre">processing</span></tt> provides a couple of ways of doing so.</p>
<dl class="docutils">
<dt><strong>Shared memory</strong>:</dt>
<dd><p class="first">Data can be stored in a shared memory map managed by an instance of
<tt class="docutils literal"><span class="pre">LocalManager</span></tt>. Only those data types supported by the <tt class="docutils literal"><span class="pre">struct</span></tt>
and <tt class="docutils literal"><span class="pre">array</span></tt> modules are supported. For example the following code</p>
<dd><p class="first">Data can be stored in a shared memory map using <tt class="docutils literal"><span class="pre">Value</span></tt> or <tt class="docutils literal"><span class="pre">Array</span></tt>.
For example the following code</p>
<pre class="literal-block">
from processing import Process, LocalManager
from processing import Process, Value, Array
def f(n, s, a):
n.value = 42
s.value = (0.75, 'hello')
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):

@@ -194,42 +192,33 @@ a[i] = -a[i]

if __name__ == '__main__':
manager = LocalManager()
num = Value('d', 0.0)
arr = Array('i', range(10))
number = manager.SharedValue('i', 0)
struct = manager.SharedStruct('d256p', (0.0, ''))
array = manager.SharedArray('i', range(10))
p = Process(target=f, args=[number, struct, array])
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print number
print struct
print array
print num.value
print arr[:]
</pre>
<p>will print</p>
<pre class="literal-block">
SharedValue('i', 42)
SharedStruct('d256p', (0.75, 'hello'))
SharedArray('i', [0, -1, -2, -3, -4, -5, -6, -7, -8, -9])
3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]
</pre>
<p>Note that <tt class="docutils literal"><span class="pre">SharedValue</span></tt>, <tt class="docutils literal"><span class="pre">SharedStruct</span></tt> and <tt class="docutils literal"><span class="pre">SharedArray</span></tt> objects
are thread and process safe. The <tt class="docutils literal"><span class="pre">'i'</span></tt> and <tt class="docutils literal"><span class="pre">'d256p'</span></tt> arguments
used when creating <tt class="docutils literal"><span class="pre">num</span></tt>, <tt class="docutils literal"><span class="pre">struct</span></tt> and <tt class="docutils literal"><span class="pre">array</span></tt> are format strings
of the type used by the <tt class="docutils literal"><span class="pre">struct</span></tt> and <tt class="docutils literal"><span class="pre">array</span></tt> modules: <tt class="docutils literal"><span class="pre">'i'</span></tt>
indicates a signed integer, <tt class="docutils literal"><span class="pre">'d'</span></tt> indicates a double precision
float and <tt class="docutils literal"><span class="pre">'256p'</span></tt> indicates a string of length less than 256. See
<a class="reference" href="manager-objects.html#shared-memory-managers">Shared memory managers</a>.</p>
<p class="last">A faster alternative to <tt class="docutils literal"><span class="pre">LocalManager</span></tt> is the <tt class="docutils literal"><span class="pre">processing.sharedctypes</span></tt>
module which supports the creation of <a class="reference" href="sharedctypes.html">ctypes objects allocated from
shared memory</a>. However, ctypes objects are not
&quot;process safe&quot;, so one must synchronize access to them.</p>
<p>The <tt class="docutils literal"><span class="pre">'d'</span></tt> and <tt class="docutils literal"><span class="pre">'i'</span></tt> arguments used when creating <tt class="docutils literal"><span class="pre">num</span></tt> and <tt class="docutils literal"><span class="pre">arr</span></tt>
are typecodes of the kind used by the <tt class="docutils literal"><span class="pre">array</span></tt> module: <tt class="docutils literal"><span class="pre">'d'</span></tt>
indicates a double precision float and <tt class="docutils literal"><span class="pre">'i'</span></tt> inidicates a signed
integer. These shared objects will be process and thread safe.</p>
<p class="last">For more flexibility in using shared memory one can use the
<tt class="docutils literal"><span class="pre">processing.sharedctypes</span></tt> module which supports the creation of
arbitrary <a class="reference" href="sharedctypes.html">ctypes objects allocated from shared memory</a>.</p>
</dd>
<dt><strong>Server process</strong>:</dt>
<dd><p class="first">A manager object returned by <tt class="docutils literal"><span class="pre">processing.Manager()</span></tt>
represents a server process which holds python objects and allows
other processes to manipulate them using proxies.</p>
<p>In addition to <tt class="docutils literal"><span class="pre">SharedValue</span></tt>, <tt class="docutils literal"><span class="pre">SharedStruct</span></tt> and <tt class="docutils literal"><span class="pre">SharedArray</span></tt> a
manager returned by <tt class="docutils literal"><span class="pre">processing.Manager()</span></tt> will support types
<tt class="docutils literal"><span class="pre">list</span></tt>, <tt class="docutils literal"><span class="pre">dict</span></tt>, <tt class="docutils literal"><span class="pre">Namespace</span></tt>, <tt class="docutils literal"><span class="pre">Lock</span></tt>, <tt class="docutils literal"><span class="pre">RLock</span></tt>, <tt class="docutils literal"><span class="pre">Semaphore</span></tt>,
<tt class="docutils literal"><span class="pre">BoundedSemaphore</span></tt>, <tt class="docutils literal"><span class="pre">Condition</span></tt>, <tt class="docutils literal"><span class="pre">Event</span></tt>, <tt class="docutils literal"><span class="pre">Queue</span></tt>. For example:</p>
<dd><p class="first">A manager object returned by <tt class="docutils literal"><span class="pre">Manager()</span></tt> controls a server process
which holds python objects and allows other processes to manipulate
them using proxies.</p>
<p>A manager returned by <tt class="docutils literal"><span class="pre">Manager()</span></tt> will support types <tt class="docutils literal"><span class="pre">list</span></tt>,
<tt class="docutils literal"><span class="pre">dict</span></tt>, <tt class="docutils literal"><span class="pre">Namespace</span></tt>, <tt class="docutils literal"><span class="pre">Lock</span></tt>, <tt class="docutils literal"><span class="pre">RLock</span></tt>, <tt class="docutils literal"><span class="pre">Semaphore</span></tt>,
<tt class="docutils literal"><span class="pre">BoundedSemaphore</span></tt>, <tt class="docutils literal"><span class="pre">Condition</span></tt>, <tt class="docutils literal"><span class="pre">Event</span></tt>, <tt class="docutils literal"><span class="pre">Queue</span></tt>, <tt class="docutils literal"><span class="pre">Value</span></tt>
and <tt class="docutils literal"><span class="pre">Array</span></tt>. For example:</p>
<pre class="literal-block">

@@ -250,3 +239,3 @@ from processing import Process, Manager

p = Process(target=f, args=[d, l])
p = Process(target=f, args=(d, l))
p.start()

@@ -265,10 +254,7 @@ p.join()

<a class="reference" href="manager-objects.html#customized-managers">Customized managers</a>.</p>
<p>Proxy objects are picklable and can be transferred between
processes.</p>
<p class="last">Server process managers are more flexible than shared memory
managers because they support more data types (and can be made to
support arbitrary types). Also, a single server process manager
can be shared by different computers over a network. They are,
however, slower than using shared memory. See <a class="reference" href="manager-objects.html#server-process-managers">Server process
managers</a>.</p>
<p class="last">Server process managers are more flexible than using shared memory
objects because they can be made to support arbitrary object types.
Also, a single manager can be shared by processes on different
computers over a network. They are, however, slower than using
shared memory. See <a class="reference" href="manager-objects.html#server-process-managers">Server process managers</a>.</p>
</dd>

@@ -301,3 +287,3 @@ </dl>

2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see
<a class="reference" href="../test/test_speed.py">test_speed.py</a>.</p>
<a class="reference" href="../examples/benchmarks.py">benchmarks.py</a>.</p>
<p><em>Number of 256 byte string objects passed between processes/threads per sec</em>:</p>

@@ -361,3 +347,3 @@ <table border="1" class="docutils">

<tr><td>processing.Lock</td>
<td>430,000</td>
<td>420,000</td>
<td>510,000</td>

@@ -374,3 +360,3 @@ </tr>

<tr><td>processing.RLock</td>
<td>430,000</td>
<td>420,000</td>
<td>500,000</td>

@@ -400,8 +386,8 @@ </tr>

<tr><td>threading.Condition</td>
<td>26,000</td>
<td>28,000</td>
<td>27,000</td>
<td>31,000</td>
</tr>
<tr><td>processing.Condition</td>
<td>22,000</td>
<td>18,000</td>
<td>26,000</td>
<td>25,000</td>
</tr>

@@ -432,9 +418,9 @@ <tr><td>Condition managed by server</td>

</tr>
<tr><td>ctypes shared array</td>
<td>3,600,000</td>
<tr><td>unsynchornized shared array</td>
<td>3,900,000</td>
<td>3,100,000</td>
</tr>
<tr><td>LocalManager shared array</td>
<td>125,000</td>
<td>135,000</td>
<tr><td>synchronized shared array</td>
<td>200,000</td>
<td>220,000</td>
</tr>

@@ -441,0 +427,0 @@ <tr><td>list managed by server</td>

@@ -86,3 +86,3 @@ .. include:: header.txt

if __name__ == '__main__':
p = Process(target=f, args=['bob'])
p = Process(target=f, args=('bob',))
p.start()

@@ -115,3 +115,3 @@ p.join()

q = Queue()
p = Process(target=f, args=[q])
p = Process(target=f, args=(q,))
p.start()

@@ -132,7 +132,8 @@ print q.get() # prints "[42, None, 'hello']"

def f(conn):
conn.send([42, None, 'hello'])
conn.send([42, None, 'hello'])
conn.close()
if __name__ == '__main__':
parent_conn, child_conn = Pipe()
p = Process(target=f, args=[child_conn])
p = Process(target=f, args=(child_conn,))
p.start()

@@ -142,9 +143,9 @@ print parent_conn.recv() # prints "[42, None, 'hello']"

The two connection objects returned by `Pipe()` represent the
two ends of the pipe. Each connection object has `send()` and
The two connection objects returned by `Pipe()` represent the two
ends of the pipe. Each connection object has `send()` and
`recv()` methods (among others). Note that data in a pipe may
become corrupted if two processes (or threads) try to
read/write to the *same* end of the pipe at the same time. Of
course there is no risk of corruption from processes using
different ends of the pipe at the same time. See `Pipes
become corrupted if two processes (or threads) try to read from or
write to the *same* end of the pipe at the same time. Of course
there is no risk of corruption from processes using different ends
of the pipe at the same time. See `Pipes
<processing-ref.html#pipes-and-queues>`_.

@@ -171,3 +172,3 @@

for num in range(10):
Process(target=f, args=[lock, num]).start()
Process(target=f, args=(lock, num)).start()

@@ -181,20 +182,17 @@ Without using the lock output from the different processes is liable

As mentioned above, when doing threaded programming it is usually best
to avoid using shared state as far as possible. This is particularly
true when using multiple processes.
As mentioned above, when doing concurrent programming it is usually
best to avoid using shared state as far as possible. This is
particularly true when using multiple processes.
However, if you really do need to use some shared data then you can
create by using a *manager*. Managers can store their data in one of
two ways:
However, if you really do need to use some shared data then
`processing` provides a couple of ways of doing so.
**Shared memory**:
Data can be stored in a shared memory map managed by an instance of
`LocalManager`. Only those data types supported by the `struct`
and `array` modules are supported. For example the following code ::
Data can be stored in a shared memory map using `Value` or `Array`.
For example the following code ::
from processing import Process, LocalManager
from processing import Process, Value, Array
def f(n, s, a):
n.value = 42
s.value = (0.75, 'hello')
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):

@@ -204,45 +202,37 @@ a[i] = -a[i]

if __name__ == '__main__':
manager = LocalManager()
number = manager.SharedValue('i', 0)
struct = manager.SharedStruct('d256p', (0.0, ''))
array = manager.SharedArray('i', range(10))
num = Value('d', 0.0)
arr = Array('i', range(10))
p = Process(target=f, args=[number, struct, array])
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print number
print struct
print array
print num.value
print arr[:]
will print ::
SharedValue('i', 42)
SharedStruct('d256p', (0.75, 'hello'))
SharedArray('i', [0, -1, -2, -3, -4, -5, -6, -7, -8, -9])
3.1415927
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]
Note that `SharedValue`, `SharedStruct` and `SharedArray` objects
are thread and process safe. The `'i'` and `'d256p'` arguments
used when creating `num`, `struct` and `array` are format strings
of the type used by the `struct` and `array` modules: `'i'`
indicates a signed integer, `'d'` indicates a double precision
float and `'256p'` indicates a string of length less than 256. See
`Shared memory managers <manager-objects.html#shared-memory-managers>`_.
The `'d'` and `'i'` arguments used when creating `num` and `arr`
are typecodes of the kind used by the `array` module: `'d'`
indicates a double precision float and `'i'` inidicates a signed
integer. These shared objects will be process and thread safe.
A faster alternative to `LocalManager` is the `processing.sharedctypes`
module which supports the creation of `ctypes objects allocated from
shared memory <sharedctypes.html>`_. However, ctypes objects are not
"process safe", so one must synchronize access to them.
For more flexibility in using shared memory one can use the
`processing.sharedctypes` module which supports the creation of
arbitrary `ctypes objects allocated from shared memory
<sharedctypes.html>`_.
**Server process**:
A manager object returned by `processing.Manager()`
represents a server process which holds python objects and allows
other processes to manipulate them using proxies.
In addition to `SharedValue`, `SharedStruct` and `SharedArray` a
manager returned by `processing.Manager()` will support types
`list`, `dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`,
`BoundedSemaphore`, `Condition`, `Event`, `Queue`. For example::
A manager object returned by `Manager()` controls a server process
which holds python objects and allows other processes to manipulate
them using proxies.
A manager returned by `Manager()` will support types `list`,
`dict`, `Namespace`, `Lock`, `RLock`, `Semaphore`,
`BoundedSemaphore`, `Condition`, `Event`, `Queue`, `Value`
and `Array`. For example::
from processing import Process, Manager

@@ -262,3 +252,3 @@

p = Process(target=f, args=[d, l])
p = Process(target=f, args=(d, l))
p.start()

@@ -278,13 +268,10 @@ p.join()

Proxy objects are picklable and can be transferred between
processes.
Server process managers are more flexible than using shared memory
objects because they can be made to support arbitrary object types.
Also, a single manager can be shared by processes on different
computers over a network. They are, however, slower than using
shared memory. See `Server process managers
<manager-objects.html#server-process-managers>`_.
Server process managers are more flexible than shared memory
managers because they support more data types (and can be made to
support arbitrary types). Also, a single server process manager
can be shared by different computers over a network. They are,
however, slower than using shared memory. See `Server process
managers <manager-objects.html#server-process-managers>`_.
Using a pool of workers

@@ -318,3 +305,3 @@ =======================

2.5Ghz laptop running Windows XP and Ubuntu Linux 6.10 --- see
`test_speed.py <../test/test_speed.py>`_.
`benchmarks.py <../examples/benchmarks.py>`_.

@@ -343,6 +330,6 @@

threading.Lock 850,000 560,000
processing.Lock 430,000 510,000
processing.Lock 420,000 510,000
Lock managed by server 10,000 8,400
threading.RLock 93,000 76,000
processing.RLock 430,000 500,000
processing.RLock 420,000 500,000
RLock managed by server 8,800 7,400

@@ -358,4 +345,4 @@ ============================== ========== ==========

============================== ========== ==========
threading.Condition 26,000 28,000
processing.Condition 22,000 18,000
threading.Condition 27,000 31,000
processing.Condition 26,000 25,000
Condition managed by server 6,600 6,000

@@ -371,4 +358,4 @@ ============================== ========== ==========

list 6,400,000 5,100,000
ctypes shared array 3,600,000 3,100,000
LocalManager shared array 125,000 135,000
unsynchornized shared array 3,900,000 3,100,000
synchronized shared array 200,000 220,000
list managed by server 20,000 17,000

@@ -375,0 +362,0 @@ ============================== ========== ==========

@@ -17,98 +17,10 @@ <?xml version="1.0" encoding="utf-8" ?>

<h1 class="title">Manager objects</h1>
<p>There are two kinds of manager: those that store data in a shared
memory map and those that use a server process.</p>
<div class="section">
<h1><a id="shared-memory-managers" name="shared-memory-managers">Shared memory managers</a></h1>
<p>An instance of <tt class="docutils literal"><span class="pre">LocalManager</span></tt> can be created for sharing data in
shared memory. For creating objects in shared memory it has the
following methods:</p>
<blockquote>
<dl class="docutils">
<dt><tt class="docutils literal"><span class="pre">SharedValue(format,</span> <span class="pre">value)</span></tt></dt>
<dd><p class="first">Creates a shared value object. <tt class="docutils literal"><span class="pre">format</span></tt> must be a format string
of the kind used by the <tt class="docutils literal"><span class="pre">struct</span></tt> module. The format must be for a
struct type which corresponds to a tuple of length 1. The shared
value is initialized with <tt class="docutils literal"><span class="pre">value</span></tt>.</p>
<p>A <tt class="docutils literal"><span class="pre">SharedValue</span></tt> object has a <tt class="docutils literal"><span class="pre">value</span></tt> property for getting or
setting its contents.</p>
<p>For example to create and modify a shared string of length
less than 256 one can do</p>
<pre class="last doctest-block">
&gt;&gt;&gt; manager = LocalManager()
&gt;&gt;&gt; string = manager.SharedValue('256p', 'hello')
&gt;&gt;&gt; print string
SharedValue('256p', 'hello')
&gt;&gt;&gt; string.value = 'goodbye'
&gt;&gt;&gt; print string
SharedValue('256p', 'goodbye')
</pre>
</dd>
<dt><tt class="docutils literal"><span class="pre">SharedStruct(format,</span> <span class="pre">value)</span></tt></dt>
<dd><p class="first">Creates a shared struct object. <tt class="docutils literal"><span class="pre">format</span></tt> must be a format string
of the kind used by the <tt class="docutils literal"><span class="pre">struct</span></tt> module. The format must be for a
struct type with length 1. The shared value is initialized with
<tt class="docutils literal"><span class="pre">value</span></tt> which should be a tuple or list.</p>
<p>A <tt class="docutils literal"><span class="pre">SharedStruct</span></tt> object has a <tt class="docutils literal"><span class="pre">value</span></tt> property for getting or
setting its value. Note that the value will always be a tuple
even if it only has length 1.</p>
<p>For example to create and modify a struct of two short
integers one can do</p>
<pre class="last doctest-block">
&gt;&gt;&gt; manager = LocalManager()
&gt;&gt;&gt; struct = manager.SharedStruct('hh', (0, 0))
&gt;&gt;&gt; print struct
SharedStruct('hh', (0, 0))
&gt;&gt;&gt; string.value = (1, 2)
&gt;&gt;&gt; print struct
SharedStruct('hh', (1, 2))
</pre>
</dd>
<dt><tt class="docutils literal"><span class="pre">SharedArray(format,</span> <span class="pre">value)</span></tt></dt>
<dd><p class="first">Creates a shared array object. <tt class="docutils literal"><span class="pre">format</span></tt> must be a format string
of the kind used by the <tt class="docutils literal"><span class="pre">array</span></tt> module. The shared value is
initialized with <tt class="docutils literal"><span class="pre">sequence</span></tt> which should be an iterable.</p>
<p>A <tt class="docutils literal"><span class="pre">SharedArray</span></tt> object can be used like an <tt class="docutils literal"><span class="pre">array.array</span></tt> object
but it has fixed length and does not support negative indices.</p>
<p>For example to create and modify an array of ten integers one
can do</p>
<pre class="last doctest-block">
&gt;&gt;&gt; manager = LocalManager()
&gt;&gt;&gt; arr = manager.SharedArray('i', [0]*10)
&gt;&gt;&gt; print arr
SharedArray('i', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
&gt;&gt;&gt; arr[:5] = [1, 2, 3, 4, 5]
&gt;&gt;&gt; print arr
SharedArray('i', [1, 2, 3, 4, 5, 0, 0, 0, 0, 0])
&gt;&gt;&gt; print arr[:]
array('i', [1, 2, 3, 4, 5, 0, 0, 0, 0, 0])
&gt;&gt;&gt; print arr.tolist()
[1, 2, 3, 4, 5, 0, 0, 0, 0, 0]
&gt;&gt;&gt; arr.tostring()
'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04 ...
</pre>
</dd>
</dl>
</blockquote>
<p>For compatibility with <tt class="docutils literal"><span class="pre">SyncManager</span></tt> (see below) <tt class="docutils literal"><span class="pre">LocalManager</span></tt> also
has a <tt class="docutils literal"><span class="pre">shutdown()</span></tt> which does nothing and attributes <tt class="docutils literal"><span class="pre">Lock</span></tt>, <tt class="docutils literal"><span class="pre">RLock</span></tt>,
<tt class="docutils literal"><span class="pre">Semaphore</span></tt>, <tt class="docutils literal"><span class="pre">BoundedSemaphore</span></tt>, <tt class="docutils literal"><span class="pre">Condition</span></tt>, <tt class="docutils literal"><span class="pre">Event</span></tt>, <tt class="docutils literal"><span class="pre">Queue</span></tt> which
are just aliases for functions in the <tt class="docutils literal"><span class="pre">processing</span></tt> namespace.
However, unlike a <tt class="docutils literal"><span class="pre">SyncManager</span></tt> object a <tt class="docutils literal"><span class="pre">LocalManager</span></tt> object cannot
be used to create <tt class="docutils literal"><span class="pre">list</span></tt>, <tt class="docutils literal"><span class="pre">dict</span></tt> or <tt class="docutils literal"><span class="pre">Namespace</span></tt> objects.</p>
<p>The space allocated to a <tt class="docutils literal"><span class="pre">SharedValue</span></tt>, <tt class="docutils literal"><span class="pre">SharedStruct</span></tt> or
<tt class="docutils literal"><span class="pre">SharedArray</span></tt> object will be cleaned up when that object is garbage
collected by the process that initially created it. Note, however,
that if it was passed as an argument to the <tt class="docutils literal"><span class="pre">Process()</span></tt> constructor
then it cannot be garbage collected until that process has completed.</p>
</div>
<div class="section">
<h1><a id="server-process-managers" name="server-process-managers">Server process managers</a></h1>
<p>A manager object controls a server process which manages <em>shared
objects</em>. Other processes can access the shared objects by using
proxies.</p>
<p>Manager processes are <em>daemonic</em> so if they are still around when
their parent process exits then it will attempt to kill them off.</p>
<p>The manager classes are defined in the <tt class="docutils literal"><span class="pre">processing.managers</span></tt> module.</p>
<p>Manager processes will be shutdown as soon as they are garbage
collected or their parent process exits. The manager classes are
defined in the <tt class="docutils literal"><span class="pre">processing.managers</span></tt> module.</p>
<div class="section">
<h2><a id="basemanager" name="basemanager">BaseManager</a></h2>
<h1><a id="basemanager" name="basemanager">BaseManager</a></h1>
<p><tt class="docutils literal"><span class="pre">BaseManager</span></tt> is the base class for all manager classes which use a

@@ -158,3 +70,3 @@ server process. It does not possess any methods which create shared

</blockquote>
<p><tt class="docutils literal"><span class="pre">BaseManager</span></tt> instances also has one read-only property:</p>
<p><tt class="docutils literal"><span class="pre">BaseManager</span></tt> instances also have one read-only property:</p>
<blockquote>

@@ -170,9 +82,6 @@ <dl class="docutils">

<div class="section">
<h2><a id="syncmanager" name="syncmanager">SyncManager</a></h2>
<h1><a id="syncmanager" name="syncmanager">SyncManager</a></h1>
<p><tt class="docutils literal"><span class="pre">SyncManager</span></tt> is a subclass of <tt class="docutils literal"><span class="pre">BaseManager</span></tt> which can be used for
the synchronization of processes. Objects of this type are returned
by <tt class="docutils literal"><span class="pre">processing.Manager()</span></tt>.</p>
<p><tt class="docutils literal"><span class="pre">SyncManager</span></tt> support equivalents of all the methods of
<tt class="docutils literal"><span class="pre">LocalManager</span></tt>, but each creates a shared object in the server
process.</p>
<p>It also supports creation of shared lists and dictionaries. The

@@ -211,10 +120,8 @@ instance methods defined by <tt class="docutils literal"><span class="pre">SyncManager</span></tt> are</p>

proxy for it.</dd>
<dt><tt class="docutils literal"><span class="pre">SharedArray(format,</span> <span class="pre">sequence)</span></tt></dt>
<dd>Create a shared <tt class="docutils literal"><span class="pre">SharedArray</span></tt> object and returns a proxy for
<dt><tt class="docutils literal"><span class="pre">Array(typecode,</span> <span class="pre">sequence)</span></tt></dt>
<dd>Create an array and returns a proxy for
it. (<tt class="docutils literal"><span class="pre">format</span></tt> is ignored.)</dd>
<dt><tt class="docutils literal"><span class="pre">SharedStruct(format,</span> <span class="pre">value)</span></tt></dt>
<dd>Create a shared <tt class="docutils literal"><span class="pre">SharedStruct</span></tt> object and returns a proxy for
it. (<tt class="docutils literal"><span class="pre">format</span></tt> is ignored.)</dd>
<dt><tt class="docutils literal"><span class="pre">SharedValue(format,</span> <span class="pre">value)</span></tt></dt>
<dd>Create a shared <tt class="docutils literal"><span class="pre">SharedValue</span></tt> object and returns a proxy for it.</dd>
<dt><tt class="docutils literal"><span class="pre">Value(typecode,</span> <span class="pre">value)</span></tt></dt>
<dd>Create an object with a writable <tt class="docutils literal"><span class="pre">value</span></tt> attribute and returns
a proxy for it.</dd>
<dt><tt class="docutils literal"><span class="pre">dict()</span></tt>, <tt class="docutils literal"><span class="pre">dict(mapping)</span></tt>, <tt class="docutils literal"><span class="pre">dict(sequence)</span></tt></dt>

@@ -227,3 +134,3 @@ <dd>Creates a shared <tt class="docutils literal"><span class="pre">dict</span></tt> object and returns a proxy for it.</dd>

<div class="section">
<h3><a id="namespace-objects" name="namespace-objects">Namespace objects</a></h3>
<h2><a id="namespace-objects" name="namespace-objects">Namespace objects</a></h2>
<p>A namespace object has no public methods but does have writable

@@ -246,3 +153,3 @@ attributes. Its representation shows the values of its attributes.</p>

<div class="section">
<h2><a id="customized-managers" name="customized-managers">Customized managers</a></h2>
<h1><a id="customized-managers" name="customized-managers">Customized managers</a></h1>
<p>To create one's own manager one creates a subclass of <tt class="docutils literal"><span class="pre">BaseManager</span></tt>.</p>

@@ -317,3 +224,3 @@ <p>To create a method of the subclass which will create new shared

<div class="section">
<h3><a id="example" name="example">Example</a></h3>
<h2><a id="example" name="example">Example</a></h2>
<pre class="literal-block">

@@ -339,7 +246,7 @@ from processing.managers import BaseManager, CreatorMethod

</pre>
<p>See <a class="reference" href="../test/test_newtype.py">test_newtype.py</a> for more examples.</p>
<p>See <a class="reference" href="../examples/ex_newtype.py">ex_newtype.py</a> for more examples.</p>
</div>
</div>
<div class="section">
<h2><a id="using-a-remote-manager" name="using-a-remote-manager">Using a remote manager</a></h2>
<h1><a id="using-a-remote-manager" name="using-a-remote-manager">Using a remote manager</a></h1>
<p>It is possible to run a manager server on one machine and have clients

@@ -383,3 +290,2 @@ use it from other machines (assuming that the firewalls involved allow

</div>
</div>
<div class="footer">

@@ -386,0 +292,0 @@ <hr class="footer" />

@@ -7,96 +7,2 @@ .. include:: header.txt

There are two kinds of manager: those that store data in a shared
memory map and those that use a server process.
Shared memory managers
######################
An instance of `LocalManager` can be created for sharing data in
shared memory. For creating objects in shared memory it has the
following methods:
`SharedValue(format, value)`
Creates a shared value object. `format` must be a format string
of the kind used by the `struct` module. The format must be for a
struct type which corresponds to a tuple of length 1. The shared
value is initialized with `value`.
A `SharedValue` object has a `value` property for getting or
setting its contents.
For example to create and modify a shared string of length
less than 256 one can do
>>> manager = LocalManager()
>>> string = manager.SharedValue('256p', 'hello')
>>> print string
SharedValue('256p', 'hello')
>>> string.value = 'goodbye'
>>> print string
SharedValue('256p', 'goodbye')
`SharedStruct(format, value)`
Creates a shared struct object. `format` must be a format string
of the kind used by the `struct` module. The format must be for a
struct type with length 1. The shared value is initialized with
`value` which should be a tuple or list.
A `SharedStruct` object has a `value` property for getting or
setting its value. Note that the value will always be a tuple
even if it only has length 1.
For example to create and modify a struct of two short
integers one can do
>>> manager = LocalManager()
>>> struct = manager.SharedStruct('hh', (0, 0))
>>> print struct
SharedStruct('hh', (0, 0))
>>> string.value = (1, 2)
>>> print struct
SharedStruct('hh', (1, 2))
`SharedArray(format, value)`
Creates a shared array object. `format` must be a format string
of the kind used by the `array` module. The shared value is
initialized with `sequence` which should be an iterable.
A `SharedArray` object can be used like an `array.array` object
but it has fixed length and does not support negative indices.
For example to create and modify an array of ten integers one
can do
>>> manager = LocalManager()
>>> arr = manager.SharedArray('i', [0]*10)
>>> print arr
SharedArray('i', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
>>> arr[:5] = [1, 2, 3, 4, 5]
>>> print arr
SharedArray('i', [1, 2, 3, 4, 5, 0, 0, 0, 0, 0])
>>> print arr[:]
array('i', [1, 2, 3, 4, 5, 0, 0, 0, 0, 0])
>>> print arr.tolist()
[1, 2, 3, 4, 5, 0, 0, 0, 0, 0]
>>> arr.tostring()
'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04 ...
For compatibility with `SyncManager` (see below) `LocalManager` also
has a `shutdown()` which does nothing and attributes `Lock`, `RLock`,
`Semaphore`, `BoundedSemaphore`, `Condition`, `Event`, `Queue` which
are just aliases for functions in the `processing` namespace.
However, unlike a `SyncManager` object a `LocalManager` object cannot
be used to create `list`, `dict` or `Namespace` objects.
The space allocated to a `SharedValue`, `SharedStruct` or
`SharedArray` object will be cleaned up when that object is garbage
collected by the process that initially created it. Note, however,
that if it was passed as an argument to the `Process()` constructor
then it cannot be garbage collected until that process has completed.
Server process managers
#######################
A manager object controls a server process which manages *shared

@@ -106,8 +12,7 @@ objects*. Other processes can access the shared objects by using

Manager processes are *daemonic* so if they are still around when
their parent process exits then it will attempt to kill them off.
Manager processes will be shutdown as soon as they are garbage
collected or their parent process exits. The manager classes are
defined in the `processing.managers` module.
The manager classes are defined in the `processing.managers` module.
BaseManager

@@ -162,9 +67,8 @@ ===========

Stop the process used by the manager. This is only available
if `start()` has been used to start the server process.
if `start()` has been used to start the server process.
This can be called multiple times.
`BaseManager` instances also have one read-only property:
`BaseManager` instances also has one read-only property:
`address`

@@ -185,6 +89,2 @@ The address used by the manager.

`SyncManager` support equivalents of all the methods of
`LocalManager`, but each creates a shared object in the server
process.
It also supports creation of shared lists and dictionaries. The

@@ -230,16 +130,13 @@ instance methods defined by `SyncManager` are

`SharedArray(format, sequence)`
Create a shared `SharedArray` object and returns a proxy for
`Array(typecode, sequence)`
Create an array and returns a proxy for
it. (`format` is ignored.)
`SharedStruct(format, value)`
Create a shared `SharedStruct` object and returns a proxy for
it. (`format` is ignored.)
`Value(typecode, value)`
Create an object with a writable `value` attribute and returns
a proxy for it.
`SharedValue(format, value)`
Create a shared `SharedValue` object and returns a proxy for it.
`dict()`, `dict(mapping)`, `dict(sequence)`
Creates a shared `dict` object and returns a proxy for it.
`list()`, `list(sequence)`

@@ -365,3 +262,3 @@ Creates a shared `list` object and returns a proxy for it.

See `test_newtype.py <../test/test_newtype.py>`_ for more examples.
See `ex_newtype.py <../examples/ex_newtype.py>`_ for more examples.

@@ -368,0 +265,0 @@ Using a remote manager

@@ -20,6 +20,12 @@ <?xml version="1.0" encoding="utf-8" ?>

<dl class="docutils">
<dt><strong>class</strong> <tt class="docutils literal"><span class="pre">Pool(processes=None)</span></tt></dt>
<dt><strong>class</strong> <tt class="docutils literal"><span class="pre">Pool(processes=None,</span> <span class="pre">initializer=None,</span> <span class="pre">initargs=())</span></tt></dt>
<dd><p class="first">A class representing a pool of worker processes.</p>
<p class="last">Tasks can be offloaded to the pool and the results dealt with
<p>Tasks can be offloaded to the pool and the results dealt with
when they become available.</p>
<p>Note that tasks can only be submitted (or retrieved) by the
process which created the pool object.</p>
<p class="last"><tt class="docutils literal"><span class="pre">processes</span></tt> is the number of worker processes to use. If
<tt class="docutils literal"><span class="pre">processes</span></tt> is <tt class="docutils literal"><span class="pre">None</span></tt> then the number returned by <tt class="docutils literal"><span class="pre">cpuCount()</span></tt>
is used. If <tt class="docutils literal"><span class="pre">initializer</span></tt> is not <tt class="docutils literal"><span class="pre">None</span></tt> then each worker
process will call <tt class="docutils literal"><span class="pre">initializer(*initargs)</span></tt> when it starts.</p>
</dd>

@@ -132,3 +138,3 @@ </dl>

result = pool.apply_async(f, [10]) # evaluate &quot;f(10)&quot; asynchronously
result = pool.apply_async(f, (10,)) # evaluate &quot;f(10)&quot; asynchronously
print result.get(timeout=1) # prints &quot;100&quot; unless your computer is *very* slow

@@ -144,6 +150,6 @@

import time
result = pool.apply_async(time.sleep, [10])
result = pool.apply_async(time.sleep, (10,))
print result.get(timeout=1) # raises `TimeoutError`
</pre>
<p>See also <a class="reference" href="../test/test_pool.py">test_pool.py</a>.</p>
<p>See also <a class="reference" href="../examples/ex_pool.py">ex_pool.py</a>.</p>
</div>

@@ -150,0 +156,0 @@ </div>

@@ -9,9 +9,17 @@ .. include:: header.txt

**class** `Pool(processes=None)`
**class** `Pool(processes=None, initializer=None, initargs=())`
A class representing a pool of worker processes.
Tasks can be offloaded to the pool and the results dealt with
when they become available.
when they become available.
Note that tasks can only be submitted (or retrieved) by the
process which created the pool object.
`processes` is the number of worker processes to use. If
`processes` is `None` then the number returned by `cpuCount()`
is used. If `initializer` is not `None` then each worker
process will call `initializer(*initargs)` when it starts.
Pool objects

@@ -133,3 +141,3 @@ ============

result = pool.apply_async(f, [10]) # evaluate "f(10)" asynchronously
result = pool.apply_async(f, (10,)) # evaluate "f(10)" asynchronously
print result.get(timeout=1) # prints "100" unless your computer is *very* slow

@@ -145,3 +153,3 @@

import time
result = pool.apply_async(time.sleep, [10])
result = pool.apply_async(time.sleep, (10,))
print result.get(timeout=1) # raises `TimeoutError`

@@ -151,3 +159,3 @@

See also `test_pool.py <../test/test_pool.py>`_.
See also `ex_pool.py <../examples/ex_pool.py>`_.

@@ -154,0 +162,0 @@ .. _Prev: proxy-objects.html

@@ -113,33 +113,2 @@ <?xml version="1.0" encoding="utf-8" ?>

indicates that the child was terminated by signal <em>N</em>.</dd>
<dt><tt class="docutils literal"><span class="pre">terminate()</span></tt></dt>
<dd>Terminate the process. On Unix this is done using the
<tt class="docutils literal"><span class="pre">SIGTERM</span></tt> signal and on Windows <tt class="docutils literal"><span class="pre">TerminateProcess()</span></tt> is used.
Note that exit handlers and finally clauses etc will not be
executed.</dd>
<dt><tt class="docutils literal"><span class="pre">stop()</span></tt></dt>
<dd><p class="first">If <tt class="docutils literal"><span class="pre">setStoppable()</span></tt> has been used to make the process
stoppable then method causes the <tt class="docutils literal"><span class="pre">processing.ProcessExit</span></tt>
exception to be raised in the child process. This exception
type is a subclass of <tt class="docutils literal"><span class="pre">SystemExit</span></tt>.</p>
<p class="last">Note that if the target process is doing a blocking call then
<tt class="docutils literal"><span class="pre">ProcessExit</span></tt> will only be raised <em>after</em> that call has
completed. Whether the arrival of the stop signal causes the
blocking call to terminate prematurely depends on the call and
on the operating system. For example,
<tt class="docutils literal"><span class="pre">threading.Lock.acquire()</span></tt> ignores any newly arrived signals,
whereas most Unix system calls which block are interruptible.</p>
</dd>
<dt><tt class="docutils literal"><span class="pre">getStoppable()</span></tt></dt>
<dd>Return whether the <tt class="docutils literal"><span class="pre">stop()</span></tt> method is operational for the
process object.</dd>
<dt><tt class="docutils literal"><span class="pre">setStoppable(value)</span></tt></dt>
<dd><p class="first"><tt class="docutils literal"><span class="pre">value</span></tt> is a boolean which determines whether the the process
object's <tt class="docutils literal"><span class="pre">stop()</span></tt> method will work after the process is
started -- by default <tt class="docutils literal"><span class="pre">stop()</span></tt> will not work.</p>
<p class="last">Whena <tt class="docutils literal"><span class="pre">value</span></tt> is true a signal handler will be installed by
the started process, and on Windows the process will use a new
console. On Windows an unfortunate side effect is that
Ctrl-Break can no longer be used to kill all the processes of
the program.</p>
</dd>
<dt><tt class="docutils literal"><span class="pre">getAuthKey()</span></tt></dt>

@@ -156,2 +125,17 @@ <dd><p class="first">Return the process's authentication key (a string).</p>

<dd>Set the process's authentication key which must be a string.</dd>
<dt><tt class="docutils literal"><span class="pre">terminate()</span></tt></dt>
<dd><p class="first">Terminate the process. On Unix this is done using the
<tt class="docutils literal"><span class="pre">SIGTERM</span></tt> signal and on Windows <tt class="docutils literal"><span class="pre">TerminateProcess()</span></tt> is used.
Note that exit handlers and finally clauses etc will not be
executed.</p>
<div class="last warning">
<p class="first admonition-title">Warning</p>
<p class="last">If this method is used when the associated process is
using a pipe or queue then the pipe or queue is liable to
become corrupted and may become unusable by other process.
Similarly, if the process has acquired a lock or semaphore
etc. then terminating it is liable to cause other
processes to deadlock.</p>
</div>
</dd>
</dl>

@@ -167,24 +151,14 @@ </blockquote>

<pre class="literal-block">
&gt;&gt;&gt; import processing, time, os, signal
&gt;&gt;&gt; p = processing.Process(target=time.sleep, args=[1000])
&gt;&gt;&gt; p.setStoppable(True)
&gt;&gt;&gt; print p
&lt;Process(Process-1, initial)&gt;
&gt;&gt;&gt; import processing, time, signal
&gt;&gt;&gt; p = processing.Process(target=time.sleep, args=(1000,))
&gt;&gt;&gt; print p, p.isAlive()
&lt;Process(Process-1, initial)&gt; False
&gt;&gt;&gt; p.start()
&gt;&gt;&gt; print p
&lt;Process(Process-1, started)&gt;
&gt;&gt;&gt; p.isAlive()
&gt;&gt;&gt; print p, p.isAlive()
&lt;Process(Process-1, started)&gt; True
&gt;&gt;&gt; p.terminate()
&gt;&gt;&gt; print p, p.isAlive()
&lt;Process(Process-1, stopped[SIGTERM])&gt; False
&gt;&gt;&gt; p.getExitCode() == -signal.SIGTERM
True
&gt;&gt;&gt; p.stop()
&gt;&gt;&gt; print p
&lt;Process(Process-1, stopped[ProcessExit])&gt;
&gt;&gt;&gt; p.isAlive()
False
&gt;&gt;&gt; p = processing.Process(target=time.sleep, args=[1000])
&gt;&gt;&gt; p.start()
&gt;&gt;&gt; os.kill(p.getPid(), signal.SIGKILL)
&gt;&gt;&gt; print p
&lt;Process(Process-2, stopped[SIGKILL])&gt;
&gt;&gt;&gt; p.getExitCode() == -signal.SIGKILL
True
</pre>

@@ -191,0 +165,0 @@ </div>

@@ -120,37 +120,2 @@ .. include:: header.txt

`terminate()`
Terminate the process. On Unix this is done using the
`SIGTERM` signal and on Windows `TerminateProcess()` is used.
Note that exit handlers and finally clauses etc will not be
executed.
`stop()`
If `setStoppable()` has been used to make the process
stoppable then method causes the `processing.ProcessExit`
exception to be raised in the child process. This exception
type is a subclass of `SystemExit`.
Note that if the target process is doing a blocking call then
`ProcessExit` will only be raised *after* that call has
completed. Whether the arrival of the stop signal causes the
blocking call to terminate prematurely depends on the call and
on the operating system. For example,
`threading.Lock.acquire()` ignores any newly arrived signals,
whereas most Unix system calls which block are interruptible.
`getStoppable()`
Return whether the `stop()` method is operational for the
process object.
`setStoppable(value)`
`value` is a boolean which determines whether the the process
object's `stop()` method will work after the process is
started -- by default `stop()` will not work.
Whena `value` is true a signal handler will be installed by
the started process, and on Windows the process will use a new
console. On Windows an unfortunate side effect is that
Ctrl-Break can no longer be used to kill all the processes of
the program.
`getAuthKey()`

@@ -171,3 +136,17 @@ Return the process's authentication key (a string).

`terminate()`
Terminate the process. On Unix this is done using the
`SIGTERM` signal and on Windows `TerminateProcess()` is used.
Note that exit handlers and finally clauses etc will not be
executed.
.. warning::
If this method is used when the associated process is
using a pipe or queue then the pipe or queue is liable to
become corrupted and may become unusable by other process.
Similarly, if the process has acquired a lock or semaphore
etc. then terminating it is liable to cause other
processes to deadlock.
Note that the `start()`, `join()`, `isAlive()` and `getExitCode()`

@@ -183,28 +162,17 @@ methods should only be called by the process that created the process

>>> import processing, time, os, signal
>>> p = processing.Process(target=time.sleep, args=[1000])
>>> p.setStoppable(True)
>>> print p
<Process(Process-1, initial)>
>>> import processing, time, signal
>>> p = processing.Process(target=time.sleep, args=(1000,))
>>> print p, p.isAlive()
<Process(Process-1, initial)> False
>>> p.start()
>>> print p
<Process(Process-1, started)>
>>> p.isAlive()
>>> print p, p.isAlive()
<Process(Process-1, started)> True
>>> p.terminate()
>>> print p, p.isAlive()
<Process(Process-1, stopped[SIGTERM])> False
>>> p.getExitCode() == -signal.SIGTERM
True
>>> p.stop()
>>> print p
<Process(Process-1, stopped[ProcessExit])>
>>> p.isAlive()
False
>>> p = processing.Process(target=time.sleep, args=[1000])
>>> p.start()
>>> os.kill(p.getPid(), signal.SIGKILL)
>>> print p
<Process(Process-2, stopped[SIGKILL])>
>>> p.getExitCode() == -signal.SIGKILL
True
.. _Prev: processing-ref.html
.. _Up: processing-ref.html
.. _Next: queue-objects.html

@@ -27,5 +27,2 @@ <?xml version="1.0" encoding="utf-8" ?>

</dd>
<dt><strong>exception</strong> <tt class="docutils literal"><span class="pre">ProcessExit</span></tt></dt>
<dd>Exception raised in a target process when the <tt class="docutils literal"><span class="pre">Process.stop()</span></tt>
method is used. This is a subclass of <tt class="docutils literal"><span class="pre">SystemExit</span></tt>.</dd>
<dt><strong>exception</strong> <tt class="docutils literal"><span class="pre">BufferTooShort</span></tt></dt>

@@ -52,3 +49,3 @@ <dd><p class="first">Exception raised by the <tt class="docutils literal"><span class="pre">recvbytes_into()</span></tt> method of a

<p>For an example of the usage of queues for interprocess communication
see <a class="reference" href="../test/test_workers.py">test_workers.py</a>.</p>
see <a class="reference" href="../examples/ex_workers.py">ex_workers.py</a>.</p>
<blockquote>

@@ -93,8 +90,3 @@ <dl class="docutils">

<dt><tt class="docutils literal"><span class="pre">Lock()</span></tt></dt>
<dd><p class="first">Returns a non-recursive lock object: a near clone of <tt class="docutils literal"><span class="pre">threading.Lock</span></tt>.</p>
<p class="last">There are two differences from <tt class="docutils literal"><span class="pre">threading.Lock</span></tt>: trying to
acquire a lock already owned by the current thread raises an
exception instead of deadlocking; and trying to release a lock
held by a different thread/process will raise and exception.</p>
</dd>
<dd>Returns a non-recursive lock object: a clone of <tt class="docutils literal"><span class="pre">threading.Lock</span></tt>.</dd>
<dt><tt class="docutils literal"><span class="pre">RLock()</span></tt></dt>

@@ -107,4 +99,58 @@ <dd>Returns a recursive lock object: a clone of <tt class="docutils literal"><span class="pre">threading.RLock</span></tt>.</dd>

</blockquote>
<div class="admonition-acquiring-with-a-timeout admonition">
<p class="first admonition-title">Acquiring with a timeout</p>
<p class="last">The <tt class="docutils literal"><span class="pre">acquire()</span></tt> method of <tt class="docutils literal"><span class="pre">BoundedSemaphore</span></tt>, <tt class="docutils literal"><span class="pre">Lock</span></tt>, <tt class="docutils literal"><span class="pre">RLock</span></tt> and
<tt class="docutils literal"><span class="pre">Semaphore</span></tt> has a timeout parameter not supported by the
equivalents in <tt class="docutils literal"><span class="pre">threading</span></tt>. The signature is <tt class="docutils literal"><span class="pre">acquire(block=True,</span>
<span class="pre">timeout=None)</span></tt> with keyword parameters being acceptable. If
<tt class="docutils literal"><span class="pre">block</span></tt> is true and <tt class="docutils literal"><span class="pre">timeout</span></tt> is not <tt class="docutils literal"><span class="pre">None</span></tt> then it specifies a
timeout in seconds. If <tt class="docutils literal"><span class="pre">block</span></tt> is false then <tt class="docutils literal"><span class="pre">timeout</span></tt> is ignored.</p>
</div>
<div class="admonition-interrupting-the-main-thread admonition">
<p class="first admonition-title">Interrupting the main thread</p>
<p>If the SIGINT signal generated by Ctrl-C arrives while the main
thread is blocked by a call to <tt class="docutils literal"><span class="pre">BoundedSemaphore.acquire()</span></tt>,
<tt class="docutils literal"><span class="pre">Lock.acquire()</span></tt>, <tt class="docutils literal"><span class="pre">RLock.acquire()</span></tt>, <tt class="docutils literal"><span class="pre">Semaphore.acquire()</span></tt> or
<tt class="docutils literal"><span class="pre">Condition.wait()</span></tt> then the call will be immediately interrupted
and <tt class="docutils literal"><span class="pre">KeyboardInterrupt</span></tt> will be raised.</p>
<p class="last">This differs from the behaviour of <tt class="docutils literal"><span class="pre">threading</span></tt> where SIGINT will be
ignored while the equivalent blocking calls are in progress.</p>
</div>
</div>
<div class="section">
<h1><a id="shared-objects" name="shared-objects">Shared Objects</a></h1>
<p>It is possible to create shared objects using shared memory which can
be inherited by child processes.</p>
<blockquote>
<dl class="docutils">
<dt><tt class="docutils literal"><span class="pre">Value(typecode,</span> <span class="pre">value,</span> <span class="pre">lock=True)</span></tt></dt>
<dd><p class="first">Returns a shared object with initial value <tt class="docutils literal"><span class="pre">value</span></tt>. The
type of the object is determined by a one character format
string of the type used by the <tt class="docutils literal"><span class="pre">array</span></tt> module. The object
will have a <tt class="docutils literal"><span class="pre">value</span></tt> attribute which can be used for setting
and getting the value.</p>
<p class="last">If <tt class="docutils literal"><span class="pre">lock</span></tt> is true (the default) then a new lock object is
created to synchronize access to the value. If <tt class="docutils literal"><span class="pre">lock</span></tt> is a
<tt class="docutils literal"><span class="pre">Lock</span></tt> or <tt class="docutils literal"><span class="pre">RLock</span></tt> object then that will be used to synchronize
access to the value. If <tt class="docutils literal"><span class="pre">lock</span></tt> is false then access to the
returned object will not be automatically protected by a lock,
so it will not necessarily be &quot;process-safe&quot;.</p>
</dd>
<dt><tt class="docutils literal"><span class="pre">Array(typecode,</span> <span class="pre">sequence,</span> <span class="pre">lock=True)</span></tt></dt>
<dd><p class="first">Returns a shared array whose length and initial contents is
determined by <tt class="docutils literal"><span class="pre">sequence</span></tt>. The type of the object is
determined by a one character format string of the type used
by the <tt class="docutils literal"><span class="pre">array</span></tt> module.</p>
<p class="last">If <tt class="docutils literal"><span class="pre">lock</span></tt> is true (the default) then a new lock object is
created to synchronize access to the array. If <tt class="docutils literal"><span class="pre">lock</span></tt> is a
<tt class="docutils literal"><span class="pre">Lock</span></tt> or <tt class="docutils literal"><span class="pre">RLock</span></tt> object then that will be used to synchronize
access to the array. If <tt class="docutils literal"><span class="pre">lock</span></tt> is false then access to the
returned object will not be automatically protected by a lock,
so it will not necessarily be &quot;process-safe&quot;.</p>
</dd>
</dl>
</blockquote>
<p>Some more flexibility is possible by using the <a class="reference" href="sharedctypes.html">sharedctypes</a> module instead.</p>
</div>
<div class="section">
<h1><a id="managers" name="managers">Managers</a></h1>

@@ -115,10 +161,2 @@ <p>Managers provide a way to create data which can be shared between

<dl class="docutils">
<dt><tt class="docutils literal"><span class="pre">LocalManager()</span></tt></dt>
<dd><p class="first">Returns a manager object which uses shared memory instead of a
server process. It has instance methods</p>
<blockquote>
<tt class="docutils literal"><span class="pre">SharedValue</span></tt>, <tt class="docutils literal"><span class="pre">SharedStruct</span></tt>, <tt class="docutils literal"><span class="pre">SharedArray</span></tt></blockquote>
<p>for creating objects stored in shared memory map.</p>
<p class="last">See <a class="reference" href="manager-objects.html#shared-memory-managers">LocalManager</a>.</p>
</dd>
<dt><tt class="docutils literal"><span class="pre">Manager()</span></tt></dt>

@@ -132,6 +170,5 @@ <dd><p class="first">Returns a started <tt class="docutils literal"><span class="pre">SyncManager</span></tt> object which can be

<blockquote>
<tt class="docutils literal"><span class="pre">list()</span></tt>, <tt class="docutils literal"><span class="pre">dict()</span></tt>, <tt class="docutils literal"><span class="pre">Namespace()</span></tt>,
<tt class="docutils literal"><span class="pre">SharedValue()</span></tt>, <tt class="docutils literal"><span class="pre">SharedStruct()</span></tt>, <tt class="docutils literal"><span class="pre">SharedArray()</span></tt>,
<tt class="docutils literal"><span class="pre">Lock()</span></tt>, <tt class="docutils literal"><span class="pre">RLock()</span></tt>, <tt class="docutils literal"><span class="pre">Semaphore()</span></tt>, <tt class="docutils literal"><span class="pre">BoundedSemaphore()</span></tt>,
<tt class="docutils literal"><span class="pre">Condition()</span></tt>, <tt class="docutils literal"><span class="pre">Event()</span></tt>, <tt class="docutils literal"><span class="pre">Queue()</span></tt>.</blockquote>
<tt class="docutils literal"><span class="pre">list()</span></tt>, <tt class="docutils literal"><span class="pre">dict()</span></tt>, <tt class="docutils literal"><span class="pre">Namespace()</span></tt>, <tt class="docutils literal"><span class="pre">Value()</span></tt>,
<tt class="docutils literal"><span class="pre">Array()</span></tt>, <tt class="docutils literal"><span class="pre">Lock()</span></tt>, <tt class="docutils literal"><span class="pre">RLock()</span></tt>, <tt class="docutils literal"><span class="pre">Semaphore()</span></tt>,
<tt class="docutils literal"><span class="pre">BoundedSemaphore()</span></tt>, <tt class="docutils literal"><span class="pre">Condition()</span></tt>, <tt class="docutils literal"><span class="pre">Event()</span></tt>, <tt class="docutils literal"><span class="pre">Queue()</span></tt>.</blockquote>
<p class="last">See <a class="reference" href="manager-objects.html#sync-manager">SyncManager</a>.</p>

@@ -141,2 +178,4 @@ </dd>

</blockquote>
<p>It is possible to create managers which support other types -- see
<a class="reference" href="manager-objects.html#customized-managers">Customized managers</a>.</p>
</div>

@@ -149,3 +188,3 @@ <div class="section">

<dl class="docutils">
<dt><tt class="docutils literal"><span class="pre">Pool(processes=None)</span></tt></dt>
<dt><tt class="docutils literal"><span class="pre">Pool(processes=None,</span> <span class="pre">initializer=None,</span> <span class="pre">initargs=())</span></tt></dt>
<dd><p class="first">Returns a process pool object which controls a pool of worker

@@ -155,4 +194,6 @@ processes to which jobs can be submitted.</p>

callbacks and has a parallel map implementation.</p>
<p>If <tt class="docutils literal"><span class="pre">processes</span></tt> is <tt class="docutils literal"><span class="pre">None</span></tt> then the number returned by
<tt class="docutils literal"><span class="pre">cpuCount()</span></tt> is used.</p>
<p><tt class="docutils literal"><span class="pre">processes</span></tt> is the number of worker processes to use. If
<tt class="docutils literal"><span class="pre">processes</span></tt> is <tt class="docutils literal"><span class="pre">None</span></tt> then the number returned by <tt class="docutils literal"><span class="pre">cpuCount()</span></tt>
is used. If <tt class="docutils literal"><span class="pre">initializer</span></tt> is not <tt class="docutils literal"><span class="pre">None</span></tt> then each worker
process will call <tt class="docutils literal"><span class="pre">initializer(*initargs)</span></tt> when it starts.</p>
<p class="last">See <a class="reference" href="pool-objects.html">Pool objects</a>.</p>

@@ -202,12 +243,11 @@ </dd>

&gt;&gt;&gt; processing.enableLogging(level=logging.INFO)
&gt;&gt;&gt; processing.getLogger().warn('doomed')
&gt;&gt;&gt; processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
&gt;&gt;&gt; m = processing.Manager()
[INFO/SyncManager-1] process starting up
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-1352-0-r97d0b'
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
&gt;&gt;&gt; del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] running all &quot;atexit&quot; finalizers
[INFO/SyncManager-1] process exiting with `os.exit(0)`
[INFO/SyncManager-1] manager exiting with exitcode 0
</pre>

@@ -228,3 +268,3 @@ </div>

<dt><tt class="docutils literal"><span class="pre">currentProcess()</span></tt></dt>
<dd><p class="first">An analogue of <tt class="docutils literal"><span class="pre">threading.currentThread</span></tt></p>
<dd><p class="first">An analogue of <tt class="docutils literal"><span class="pre">threading.currentThread()</span></tt>.</p>
<p class="last">Returns the object corresponding to the current process.</p>

@@ -242,12 +282,10 @@ </dd>

def f():
print &quot;hello world!&quot;
print 'hello world!'
if __name__ == '__main__':
freezeSupport()
p = Process(target=f)
p.start()
Process(target=f).start()
</pre>
<p>If the <tt class="docutils literal"><span class="pre">freezeSupport()</span></tt> line is missed out then the frozen
executable produced from this module would (on Windows)
recursively create new processes.</p>
<p>If the <tt class="docutils literal"><span class="pre">freezeSupport()</span></tt> line is missed out then trying to run
the frozen executable will raise <tt class="docutils literal"><span class="pre">RuntimeError</span></tt>.</p>
<p class="last">If the module is being run normally by the python interpreter

@@ -254,0 +292,0 @@ then <tt class="docutils literal"><span class="pre">freezeSupport()</span></tt> has no effect.</p>

@@ -19,6 +19,2 @@ .. include:: header.txt

**exception** `ProcessExit`
Exception raised in a target process when the `Process.stop()`
method is used. This is a subclass of `SystemExit`.
**exception** `BufferTooShort`

@@ -48,3 +44,3 @@ Exception raised by the `recvbytes_into()` method of a

For an example of the usage of queues for interprocess communication
see `test_workers.py <../test/test_workers.py>`_.
see `ex_workers.py <../examples/ex_workers.py>`_.

@@ -93,8 +89,3 @@ `Pipe(duplex=True)`

`Lock()`
Returns a non-recursive lock object: a near clone of `threading.Lock`.
There are two differences from `threading.Lock`: trying to
acquire a lock already owned by the current thread raises an
exception instead of deadlocking; and trying to release a lock
held by a different thread/process will raise and exception.
Returns a non-recursive lock object: a clone of `threading.Lock`.

@@ -109,2 +100,61 @@ `RLock()`

.. admonition:: Acquiring with a timeout
The `acquire()` method of `BoundedSemaphore`, `Lock`, `RLock` and
`Semaphore` has a timeout parameter not supported by the
equivalents in `threading`. The signature is `acquire(block=True,
timeout=None)` with keyword parameters being acceptable. If
`block` is true and `timeout` is not `None` then it specifies a
timeout in seconds. If `block` is false then `timeout` is ignored.
.. admonition:: Interrupting the main thread
If the SIGINT signal generated by Ctrl-C arrives while the main
thread is blocked by a call to `BoundedSemaphore.acquire()`,
`Lock.acquire()`, `RLock.acquire()`, `Semaphore.acquire()` or
`Condition.wait()` then the call will be immediately interrupted
and `KeyboardInterrupt` will be raised.
This differs from the behaviour of `threading` where SIGINT will be
ignored while the equivalent blocking calls are in progress.
Shared Objects
--------------
It is possible to create shared objects using shared memory which can
be inherited by child processes.
`Value(typecode, value, lock=True)`
Returns a shared object with initial value `value`. The
type of the object is determined by a one character format
string of the type used by the `array` module. The object
will have a `value` attribute which can be used for setting
and getting the value.
If `lock` is true (the default) then a new lock object is
created to synchronize access to the value. If `lock` is a
`Lock` or `RLock` object then that will be used to synchronize
access to the value. If `lock` is false then access to the
returned object will not be automatically protected by a lock,
so it will not necessarily be "process-safe".
`Array(typecode, sequence, lock=True)`
Returns a shared array whose length and initial contents is
determined by `sequence`. The type of the object is
determined by a one character format string of the type used
by the `array` module.
If `lock` is true (the default) then a new lock object is
created to synchronize access to the array. If `lock` is a
`Lock` or `RLock` object then that will be used to synchronize
access to the array. If `lock` is false then access to the
returned object will not be automatically protected by a lock,
so it will not necessarily be "process-safe".
Some more flexibility is possible by using the `sharedctypes
<sharedctypes.html>`_ module instead.
Managers

@@ -116,13 +166,2 @@ --------

`LocalManager()`
Returns a manager object which uses shared memory instead of a
server process. It has instance methods
`SharedValue`, `SharedStruct`, `SharedArray`
for creating objects stored in shared memory map.
See `LocalManager <manager-objects.html#shared-memory-managers>`_.
`Manager()`

@@ -137,10 +176,12 @@ Returns a started `SyncManager` object which can be

`list()`, `dict()`, `Namespace()`,
`SharedValue()`, `SharedStruct()`, `SharedArray()`,
`Lock()`, `RLock()`, `Semaphore()`, `BoundedSemaphore()`,
`Condition()`, `Event()`, `Queue()`.
`list()`, `dict()`, `Namespace()`, `Value()`,
`Array()`, `Lock()`, `RLock()`, `Semaphore()`,
`BoundedSemaphore()`, `Condition()`, `Event()`, `Queue()`.
See `SyncManager <manager-objects.html#sync-manager>`_.
It is possible to create managers which support other types -- see
`Customized managers <manager-objects.html#customized-managers>`_.
Process Pools

@@ -152,3 +193,3 @@ -------------

`Pool(processes=None)`
`Pool(processes=None, initializer=None, initargs=())`
Returns a process pool object which controls a pool of worker

@@ -160,4 +201,6 @@ processes to which jobs can be submitted.

If `processes` is `None` then the number returned by
`cpuCount()` is used.
`processes` is the number of worker processes to use. If
`processes` is `None` then the number returned by `cpuCount()`
is used. If `initializer` is not `None` then each worker
process will call `initializer(*initargs)` when it starts.

@@ -208,12 +251,11 @@ See `Pool objects <pool-objects.html>`_.

>>> processing.enableLogging(level=logging.INFO)
>>> processing.getLogger().warn('doomed')
>>> processing.getLogger().warning('doomed')
[WARNING/MainProcess] doomed
>>> m = processing.Manager()
[INFO/SyncManager-1] process starting up
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-1352-0-r97d0b'
[INFO/SyncManager-1] child process calling self.run()
[INFO/SyncManager-1] manager bound to '\\\\.\\pipe\\pyc-2776-0-lj0tfa'
>>> del m
[INFO/MainProcess] sending shutdown message to manager
[INFO/SyncManager-1] manager received shutdown message
[INFO/SyncManager-1] running all "atexit" finalizers
[INFO/SyncManager-1] process exiting with `os.exit(0)`
[INFO/SyncManager-1] manager exiting with exitcode 0

@@ -235,3 +277,3 @@

`currentProcess()`
An analogue of `threading.currentThread`
An analogue of `threading.currentThread()`.

@@ -251,8 +293,7 @@ Returns the object corresponding to the current process.

def f():
print "hello world!"
print 'hello world!'
if __name__ == '__main__':
freezeSupport()
p = Process(target=f)
p.start()
Process(target=f).start()

@@ -259,0 +300,0 @@ If the `freezeSupport()` line is missed out then trying to run

@@ -35,12 +35,5 @@ <?xml version="1.0" encoding="utf-8" ?>

protect it with a lock.</p>
<p>Alternatively another copy of the proxy can be created using the
<tt class="docutils literal"><span class="pre">copy.copy()</span></tt> function.</p>
<p class="last">(There is never a problem with different processes using the
'same' proxy.)</p>
</dd>
<dt><em>Catching</em> <tt class="docutils literal"><span class="pre">ProcessExit</span></tt></dt>
<dd>If a <tt class="docutils literal"><span class="pre">ProcessExit</span></tt> exception is raised in a child process
then that process should end (almost) immediately; otherwise the
<tt class="docutils literal"><span class="pre">stop()</span></tt> method will not work as expected. Note that
<tt class="docutils literal"><span class="pre">ProcessExit</span></tt> is a subclass of <tt class="docutils literal"><span class="pre">SystemExit</span></tt>.</dd>
<dt><em>Joining zombie processes</em></dt>

@@ -61,2 +54,39 @@ <dd>On Unix when a process finishes but has not been joined it becomes

elsewhere can inherit it from an ancestor process.</dd>
<dt><em>Avoid terminating processes</em></dt>
<dd><p class="first">Using the <tt class="docutils literal"><span class="pre">terminate()</span></tt> method to stop a process is liable to
cause any shared resources (such as locks, semaphores, pipes and
queues) currently being used by the process to become broken or
unavailable to other processes.</p>
<p class="last">Therefore it is probably best to only consider using <tt class="docutils literal"><span class="pre">terminate()</span></tt>
on processes which never use any shared resources.</p>
</dd>
<dt><em>Joining processes that use queues</em></dt>
<dd><p class="first">Bear in mind that a process that has put items in a queue will
wait before terminating until all the buffered items are fed by
the &quot;feeder&quot; thread to the underlying pipe. (The child process
can call the <tt class="docutils literal"><span class="pre">canceljoin()</span></tt> method of the queue to avoid this
behaviour.)</p>
<p>This means that whenever you use a queue you need to make sure
that all items which have been put on the queue will eventually be
removed before the process is joined. Otherwise you cannot be
sure that processes which have put items on the queue will
terminate. Remember also that non-daemonic processes will be
automatically be joined.</p>
<p>An example which will deadlock is the following:</p>
<pre class="literal-block">
from processing import Process, Queue
def f(q):
q.put('X' * 1000000)
if __name__ == '__main__':
queue = Queue()
p = Process(target=f, args=(queue,))
p.start()
p.join() # this deadlocks
obj = queue.get()
</pre>
<p class="last">A fix here would be to swap the last two lines round (or simply
remove the <tt class="docutils literal"><span class="pre">p.join()</span></tt> line).</p>
</dd>
<dt><em>Explicity pass resources to child processes</em></dt>

@@ -94,3 +124,3 @@ <dd><p class="first">On Unix a child process can make use of a shared resource created

for i in range(10):
Process(target=f, args=[lock]).start()
Process(target=f, args=(lock,)).start()
</pre>

@@ -126,4 +156,3 @@ </dd>

<p>For example, under Windows running the following module would
recursively create new processes until you run out of memory or
get a crash:</p>
fail with a <tt class="docutils literal"><span class="pre">RuntimeError</span></tt>:</p>
<pre class="literal-block">

@@ -138,4 +167,4 @@ from processing import Process

</pre>
<p>Instead one should protect creation of the new process by using
<tt class="docutils literal"><span class="pre">if</span> <span class="pre">__name__</span> <span class="pre">==</span> <span class="pre">'__main__':</span></tt> as follows:</p>
<p>Instead one should protect the &quot;entry point&quot; of the program by
using <tt class="docutils literal"><span class="pre">if</span> <span class="pre">__name__</span> <span class="pre">==</span> <span class="pre">'__main__':</span></tt> as follows:</p>
<pre class="literal-block">

@@ -148,5 +177,8 @@ from processing import Process

if __name__ == '__main__':
freezeSupport()
p = Process(target=foo)
p.start()
</pre>
<p>(The <tt class="docutils literal"><span class="pre">freezeSupport()</span></tt> line can be ommitted if the program will
be run normally instead of frozen.)</p>
<p>This allows the newly spawned Python interpreter to safely import

@@ -157,23 +189,2 @@ the module and then run the module's <tt class="docutils literal"><span class="pre">foo()</span></tt> function.</p>

</dd>
<dt><em>Freezing</em>:</dt>
<dd><p class="first">One can produce Windows executables from a python program by using
<tt class="docutils literal"><span class="pre">py2exe</span></tt>, <tt class="docutils literal"><span class="pre">PyInstaller</span></tt>, <tt class="docutils literal"><span class="pre">cx_Freeze</span></tt> etc. However, if the program
uses <tt class="docutils literal"><span class="pre">processing</span></tt> then one needs to call <tt class="docutils literal"><span class="pre">freezeSupport()</span></tt>
immediately after the <tt class="docutils literal"><span class="pre">if</span> <span class="pre">__name__</span> <span class="pre">==</span> <span class="pre">'__main__':</span></tt> line of the
main module. Otherwise one will probably get the same problems
mentioned above concerning <em>Safe importing</em>. For example</p>
<pre class="literal-block">
from processing import Process, freezeSupport
def foo():
print 'hello'
if __name__ == '__main__':
freezeSupport()
p = Process(target=foo)
p.start()
</pre>
<p class="last">Note that calling <tt class="docutils literal"><span class="pre">freezeSupport()</span></tt> at some other point in the
main module is likely to cause problems.</p>
</dd>
</dl>

@@ -180,0 +191,0 @@ </div>

@@ -30,14 +30,5 @@ .. include:: header.txt

Alternatively another copy of the proxy can be created using the
`copy.copy()` function.
(There is never a problem with different processes using the
'same' proxy.)
*Catching* `ProcessExit`
If a `ProcessExit` exception is raised in a child process
then that process should end (almost) immediately; otherwise the
`stop()` method will not work as expected. Note that
`ProcessExit` is a subclass of `SystemExit`.
*Joining zombie processes*

@@ -60,2 +51,42 @@ On Unix when a process finishes but has not been joined it becomes

*Avoid terminating processes*
Using the `terminate()` method to stop a process is liable to
cause any shared resources (such as locks, semaphores, pipes and
queues) currently being used by the process to become broken or
unavailable to other processes.
Therefore it is probably best to only consider using `terminate()`
on processes which never use any shared resources.
*Joining processes that use queues*
Bear in mind that a process that has put items in a queue will
wait before terminating until all the buffered items are fed by
the "feeder" thread to the underlying pipe. (The child process
can call the `canceljoin()` method of the queue to avoid this
behaviour.)
This means that whenever you use a queue you need to make sure
that all items which have been put on the queue will eventually be
removed before the process is joined. Otherwise you cannot be
sure that processes which have put items on the queue will
terminate. Remember also that non-daemonic processes will be
automatically be joined.
An example which will deadlock is the following::
from processing import Process, Queue
def f(q):
q.put('X' * 1000000)
if __name__ == '__main__':
queue = Queue()
p = Process(target=f, args=(queue,))
p.start()
p.join() # this deadlocks
obj = queue.get()
A fix here would be to swap the last two lines round (or simply
remove the `p.join()` line).
*Explicity pass resources to child processes*

@@ -95,3 +126,3 @@ On Unix a child process can make use of a shared resource created

for i in range(10):
Process(target=f, args=[lock]).start()
Process(target=f, args=(lock,)).start()

@@ -98,0 +129,0 @@

@@ -101,3 +101,3 @@ <?xml version="1.0" encoding="utf-8" ?>

<a class="reference" href="#id1">(*)</a> or if the result is an unpicklable iterator then a
proxy for the result.</p>
proxy for the iterator.</p>
<p>If an exception is raised by <a class="reference" href="#id1">(*)</a> then then is re-raised by

@@ -104,0 +104,0 @@ <tt class="docutils literal"><span class="pre">_callmethod()</span></tt>. If some other exception is raised in the

@@ -97,3 +97,3 @@ .. include:: header.txt

`(*)`_ or if the result is an unpicklable iterator then a
proxy for the result.
proxy for the iterator.

@@ -100,0 +100,0 @@ If an exception is raised by `(*)`_ then then is re-raised by

@@ -24,3 +24,3 @@ <?xml version="1.0" encoding="utf-8" ?>

locks/semaphores. When a process first puts an item on the queue
a background thread is started which transfers objects from a
a feeder thread is started which transfers objects from a
buffer into the pipe.</p>

@@ -105,8 +105,22 @@ <p><tt class="docutils literal"><span class="pre">Queue.Queue</span></tt> implements all the methods of <tt class="docutils literal"><span class="pre">Queue.Queue</span></tt> except for

<p class="first admonition-title">Warning</p>
<p class="last">If a process is killed while it is trying to use a <tt class="docutils literal"><span class="pre">Queue</span></tt> or
<tt class="docutils literal"><span class="pre">SimpleQueue</span></tt> then the data in the queue is likely to become
corrupted because it may become impossible to be sure where the
message boundaries lie.</p>
<p class="last">If a process is killed using the <tt class="docutils literal"><span class="pre">terminate()</span></tt> method or
<tt class="docutils literal"><span class="pre">os.kill()</span></tt> while it is trying to use a <tt class="docutils literal"><span class="pre">Queue</span></tt> then the data in
the queue is likely to become corrupted. This may cause any other
processes to get an exception when it tries to use the queue later
on.</p>
</div>
<div class="warning">
<p class="first admonition-title">Warning</p>
<p>As mentioned above, if a child process has put items on a queue
(and it has not used <tt class="docutils literal"><span class="pre">canceljoin()</span></tt>) then that process will not
terminate until all buffered items have been flushed to the pipe.</p>
<p>This means that if you try joining that process you may get a
deadlock unless you are sure that all items which have been put on
the queue have been consumed. Similarly, if the child process is
non-daemonic then the parent process may hang on exit when it tries
to join all it non-daemonic children.</p>
<p class="last">Note that a queue created using a manager does not have this issue.
See <a class="reference" href="programming-guidelines.html">Programming Guidelines</a>.</p>
</div>
</div>
<div class="footer">

@@ -113,0 +127,0 @@ <hr class="footer" />

@@ -14,3 +14,3 @@ .. include:: header.txt

locks/semaphores. When a process first puts an item on the queue
a background thread is started which transfers objects from a
a feeder thread is started which transfers objects from a
buffer into the pipe.

@@ -99,10 +99,28 @@

If a process is killed while it is trying to use a `Queue` or
`SimpleQueue` then the data in the queue is likely to become
corrupted because it may become impossible to be sure where the
message boundaries lie.
If a process is killed using the `terminate()` method or
`os.kill()` while it is trying to use a `Queue` then the data in
the queue is likely to become corrupted. This may cause any other
processes to get an exception when it tries to use the queue later
on.
.. warning::
As mentioned above, if a child process has put items on a queue
(and it has not used `canceljoin()`) then that process will not
terminate until all buffered items have been flushed to the pipe.
This means that if you try joining that process you may get a
deadlock unless you are sure that all items which have been put on
the queue have been consumed. Similarly, if the child process is
non-daemonic then the parent process may hang on exit when it tries
to join all it non-daemonic children.
Note that a queue created using a manager does not have this issue.
See `Programming Guidelines <programming-guidelines.html>`_.
.. _Prev: process-objects.html
.. _Up: processing-ref.html
.. _Next: connection-objects.html

@@ -21,21 +21,25 @@ <?xml version="1.0" encoding="utf-8" ?>

the <tt class="docutils literal"><span class="pre">ctypes</span></tt> package.)</p>
<p>Note that access to a ctypes objects is not protected by any lock.
However accessing a ctypes object can be much faster (20+ times
faster) than accessing a synchronized shared object allocated using
<tt class="docutils literal"><span class="pre">LocalManager</span></tt>.</p>
<p>Note that access to a ctypes objects is not protected by any lock, so
it possible for more than one process to try modifying the value of a
shared ctypes object at the same time. The <tt class="docutils literal"><span class="pre">synchronized()</span></tt> function
can be used to create a syncrhonized wrapper for the object which does
not suffer from this problem, but access through the wrapper can be a
lot slower.</p>
<p>In simple cases one can use <tt class="docutils literal"><span class="pre">processing.Value()</span></tt> or
<tt class="docutils literal"><span class="pre">processing.Array()</span></tt> instead of using the functions in this module.</p>
<p>The functions in the module are</p>
<blockquote>
<dl class="docutils">
<dt><tt class="docutils literal"><span class="pre">new_value(fmt_or_type,</span> <span class="pre">*args)</span></tt></dt>
<dt><tt class="docutils literal"><span class="pre">Value(typecode_or_type,</span> <span class="pre">*args)</span></tt></dt>
<dd><p class="first">Returns a ctypes object allocated from shared memory.</p>
<p class="last"><tt class="docutils literal"><span class="pre">fmt_or_type</span></tt> determines the type of the returned object: it
is either a ctypes type or a one character string format of
the kind used by the <tt class="docutils literal"><span class="pre">array</span></tt> module. The remaining arguments
are passed on to the constructor for the type.</p>
<p class="last"><tt class="docutils literal"><span class="pre">typecode_or_type</span></tt> determines the type of the returned object:
it is either a ctypes type or a one character typecode of the
kind used by the <tt class="docutils literal"><span class="pre">array</span></tt> module. The remaining arguments are
passed on to the constructor for the type.</p>
</dd>
<dt><tt class="docutils literal"><span class="pre">new_array(fmt_or_type,</span> <span class="pre">size_or_initializer)</span></tt></dt>
<dt><tt class="docutils literal"><span class="pre">Array(typecode_or_type,</span> <span class="pre">size_or_initializer)</span></tt></dt>
<dd><p class="first">Returns a ctypes array allocated from shared memory.</p>
<p class="last"><tt class="docutils literal"><span class="pre">fmt_or_type</span></tt> determines the type of the elements of the
<p><tt class="docutils literal"><span class="pre">typecode_or_type</span></tt> determines the type of the elements of the
returned array: it is either a ctypes type or a one character
string format of the kind used by the <tt class="docutils literal"><span class="pre">array</span></tt> module. If
typecode of the kind used by the <tt class="docutils literal"><span class="pre">array</span></tt> module. If
<tt class="docutils literal"><span class="pre">size_or_initializer</span></tt> is an integer then it determines the

@@ -46,2 +50,5 @@ length of the array, and the array will be initially zeroed.

the array.</p>
<p class="last">Note that an array of <tt class="docutils literal"><span class="pre">ctypes.c_char</span></tt> has <tt class="docutils literal"><span class="pre">value</span></tt> and
<tt class="docutils literal"><span class="pre">rawvalue</span></tt> attributes which allow one to use it to store and
retrieve strings -- see documentation for <tt class="docutils literal"><span class="pre">ctypes</span></tt>.</p>
</dd>

@@ -51,2 +58,11 @@ <dt><tt class="docutils literal"><span class="pre">copy(obj)</span></tt></dt>

a copy of the ctypes object <tt class="docutils literal"><span class="pre">obj</span></tt>.</dd>
<dt><tt class="docutils literal"><span class="pre">synchronized(obj,</span> <span class="pre">lock=None)</span></tt></dt>
<dd><p class="first">Returns a process-safe wrapper object for a ctypes object
which uses <tt class="docutils literal"><span class="pre">lock</span></tt> to synchronize access. If <tt class="docutils literal"><span class="pre">lock</span></tt> is <tt class="docutils literal"><span class="pre">None</span></tt>
then a <tt class="docutils literal"><span class="pre">processing.RLock</span></tt> object is created automatically.</p>
<p class="last">A synchronized wrapper will have two methods in addition to
those of the object it wraps: <tt class="docutils literal"><span class="pre">getobj()</span></tt> returns the wrapped
object and <tt class="docutils literal"><span class="pre">getlock()</span></tt> returns the lock object used for
synchronization.</p>
</dd>
</dl>

@@ -61,28 +77,28 @@ </blockquote>

<colgroup>
<col width="37%" />
<col width="36%" />
<col width="27%" />
<col width="28%" />
<col width="33%" />
<col width="39%" />
</colgroup>
<thead valign="bottom">
<tr><th class="head">sharedctypes using type</th>
<th class="head">sharedctypes using format</th>
<th class="head">ctypes</th>
<tr><th class="head">ctypes</th>
<th class="head">sharedctypes using type</th>
<th class="head">sharedctypes using typecode</th>
</tr>
</thead>
<tbody valign="top">
<tr><td>new_value(c_double, 2.4)</td>
<td>new_value('d', 2.4)</td>
<td>c_double(2.4)</td>
<tr><td>c_double(2.4)</td>
<td>Value(c_double, 2.4)</td>
<td>Value('d', 2.4)</td>
</tr>
<tr><td>new_value(MyStruct, 4, 6)</td>
<tr><td>MyStruct(4, 6)</td>
<td>Value(MyStruct, 4, 6)</td>
<td>&nbsp;</td>
<td>MyStruct(4, 6)</td>
</tr>
<tr><td>new_array(c_short, 7)</td>
<td>new_array('h', 7)</td>
<td>(c_short * 7)()</td>
<tr><td>(c_short * 7)()</td>
<td>Array(c_short, 7)</td>
<td>Array('h', 7)</td>
</tr>
<tr><td>new_array(c_int, (9, 2, 8))</td>
<td>new_array('i', (9, 2, 8))</td>
<td>(c_int * 3)(9, 2, 8)</td>
<tr><td>(c_int * 3)(9, 2, 8)</td>
<td>Array(c_int, (9, 2, 8))</td>
<td>Array('i', (9, 2, 8))</td>
</tr>

@@ -98,3 +114,3 @@ </tbody>

from processing import Process
from processing.sharedctypes import new_value, new_array
from processing.sharedctypes import Value, Array, synchronized
from ctypes import Structure, c_double

@@ -114,7 +130,9 @@

if __name__ == '__main__':
n = new_value('i', 7)
x = new_value('d', 1.0/3.0)
s = new_array('c', 'hello world')
A = new_array(Point, [(1.875, -6.25), (-5.75, 2.0), (2.375, 9.5)])
n = Value('i', 7)
x = Value(ctypes.c_double, 1.0/3.0)
s = Array('c', 'hello world')
A = Array(Point, [(1.875, -6.25), (-5.75, 2.0), (2.375, 9.5)])
x = synchronized(x) # replace x by a synchronized wrapper
p = Process(target=modify, args=(n, x, s, A))

@@ -124,4 +142,4 @@ p.start()

print n
print x
print n.value
print x.value
print s.value

@@ -132,9 +150,18 @@ print [(p.x, p.y) for p in A]

<pre class="literal-block">
c_long(49)
c_double(0.1111111111111111)
49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]
</pre>
<div class="admonition-avoid-sharing-pointers admonition">
<p class="first admonition-title">Avoid sharing pointers</p>
<p class="last">Although it is entirely posible to store a pointer in shared
memory remember that this will refer to a location in the address
space of the <em>current</em> process. However, the pointer is quite
likely to be invalid in the context of a second process and trying
to dereference the pointer from the second process may cause a
crash.</p>
</div>
</div>
</div>
<div class="footer">

@@ -141,0 +168,0 @@ <hr class="footer" />

@@ -10,25 +10,30 @@ .. include:: header.txt

processes. (See the standard library's documentation for details of
the `ctypes` package.)
the `ctypes` package.)
Note that access to a ctypes objects is not protected by any lock.
However accessing a ctypes object can be much faster (20+ times
faster) than accessing a synchronized shared object allocated using
`LocalManager`.
Note that access to a ctypes objects is not protected by any lock, so
it possible for more than one process to try modifying the value of a
shared ctypes object at the same time. The `synchronized()` function
can be used to create a syncrhonized wrapper for the object which does
not suffer from this problem, but access through the wrapper can be a
lot slower.
In simple cases one can use `processing.Value()` or
`processing.Array()` instead of using the functions in this module.
The functions in the module are
`new_value(fmt_or_type, *args)`
`Value(typecode_or_type, *args)`
Returns a ctypes object allocated from shared memory.
`fmt_or_type` determines the type of the returned object: it
is either a ctypes type or a one character string format of
the kind used by the `array` module. The remaining arguments
are passed on to the constructor for the type.
`typecode_or_type` determines the type of the returned object:
it is either a ctypes type or a one character typecode of the
kind used by the `array` module. The remaining arguments are
passed on to the constructor for the type.
`new_array(fmt_or_type, size_or_initializer)`
`Array(typecode_or_type, size_or_initializer)`
Returns a ctypes array allocated from shared memory.
`fmt_or_type` determines the type of the elements of the
`typecode_or_type` determines the type of the elements of the
returned array: it is either a ctypes type or a one character
string format of the kind used by the `array` module. If
typecode of the kind used by the `array` module. If
`size_or_initializer` is an integer then it determines the

@@ -40,2 +45,6 @@ length of the array, and the array will be initially zeroed.

Note that an array of `ctypes.c_char` has `value` and
`rawvalue` attributes which allow one to use it to store and
retrieve strings -- see documentation for `ctypes`.
`copy(obj)`

@@ -45,3 +54,13 @@ Returns a ctypes object allocated from shared memory which is

`synchronized(obj, lock=None)`
Returns a process-safe wrapper object for a ctypes object
which uses `lock` to synchronize access. If `lock` is `None`
then a `processing.RLock` object is created automatically.
A synchronized wrapper will have two methods in addition to
those of the object it wraps: `getobj()` returns the wrapped
object and `getlock()` returns the lock object used for
synchronization.
Equivalences

@@ -54,10 +73,10 @@ ============

============================ =========================== ====================
sharedctypes using type sharedctypes using format ctypes
============================ =========================== ====================
new_value(c_double, 2.4) new_value('d', 2.4) c_double(2.4)
new_value(MyStruct, 4, 6) MyStruct(4, 6)
new_array(c_short, 7) new_array('h', 7) (c_short * 7)()
new_array(c_int, (9, 2, 8)) new_array('i', (9, 2, 8)) (c_int * 3)(9, 2, 8)
============================ =========================== ====================
==================== ======================== ============================
ctypes sharedctypes using type sharedctypes using typecode
==================== ======================== ============================
c_double(2.4) Value(c_double, 2.4) Value('d', 2.4)
MyStruct(4, 6) Value(MyStruct, 4, 6)
(c_short * 7)() Array(c_short, 7) Array('h', 7)
(c_int * 3)(9, 2, 8) Array(c_int, (9, 2, 8)) Array('i', (9, 2, 8))
==================== ======================== ============================

@@ -72,3 +91,3 @@

from processing import Process
from processing.sharedctypes import new_value, new_array
from processing.sharedctypes import Value, Array, synchronized
from ctypes import Structure, c_double

@@ -88,7 +107,9 @@

if __name__ == '__main__':
n = new_value('i', 7)
x = new_value('d', 1.0/3.0)
s = new_array('c', 'hello world')
A = new_array(Point, [(1.875, -6.25), (-5.75, 2.0), (2.375, 9.5)])
n = Value('i', 7)
x = Value(ctypes.c_double, 1.0/3.0)
s = Array('c', 'hello world')
A = Array(Point, [(1.875, -6.25), (-5.75, 2.0), (2.375, 9.5)])
x = synchronized(x) # replace x by a synchronized wrapper
p = Process(target=modify, args=(n, x, s, A))

@@ -98,4 +119,4 @@ p.start()

print n
print x
print n.value
print x.value
print s.value

@@ -106,9 +127,19 @@ print [(p.x, p.y) for p in A]

c_long(49)
c_double(0.1111111111111111)
49
0.1111111111111111
HELLO WORLD
[(3.515625, 39.0625), (33.0625, 4.0), (5.640625, 90.25)]
.. admonition:: Avoid sharing pointers
Although it is entirely posible to store a pointer in shared
memory remember that this will refer to a location in the address
space of the *current* process. However, the pointer is quite
likely to be invalid in the context of a second process and trying
to dereference the pointer from the second process may cause a
crash.
.. _Prev: pool-objects.html
.. _Up: processing-ref.html
.. _Next: connection-ref.html

@@ -7,3 +7,3 @@ <?xml version="1.0" encoding="utf-8" ?>

<meta name="generator" content="Docutils 0.4: http://docutils.sourceforge.net/" />
<title>Tests</title>
<title>Tests and Examples</title>
<link rel="stylesheet" href="html4css1.css" type="text/css" />

@@ -16,54 +16,36 @@ </head>

</div>
<div class="document" id="tests">
<h1 class="title">Tests</h1>
<p>The <tt class="docutils literal"><span class="pre">processing</span></tt> package contains a <tt class="docutils literal"><span class="pre">test</span></tt> sub-package which
contains various test modules.</p>
<p>You can do a test run by doing:</p>
<blockquote>
<tt class="docutils literal"><span class="pre">python</span> <span class="pre">-m</span> <span class="pre">processing.test</span></tt></blockquote>
<div class="document" id="tests-and-examples">
<h1 class="title">Tests and Examples</h1>
<p><tt class="docutils literal"><span class="pre">processing</span></tt> contains a <tt class="docutils literal"><span class="pre">test</span></tt> sub-package which contains unit tests
for the package. You can do a test run by doing</p>
<pre class="literal-block">
python -m processing.tests
</pre>
<p>on Python 2.5 or</p>
<pre class="literal-block">
python -c &quot;from processing.tests import main; main()&quot;
</pre>
<p>on Python 2.4. This will run many of the tests using processes,
threads, and processes with a manager.</p>
<p>The <tt class="docutils literal"><span class="pre">example</span></tt> sub-package contains the following modules:</p>
<blockquote>
<tt class="docutils literal"><span class="pre">python</span> <span class="pre">-c</span> <span class="pre">&quot;from</span> <span class="pre">processing.test</span> <span class="pre">import</span> <span class="pre">main;</span> <span class="pre">main()&quot;</span></tt></blockquote>
<p>on Python 2.4.</p>
<p>This will run the tests in the following modules:</p>
<blockquote>
<dl class="docutils">
<dt><a class="reference" href="../test/test_connection.py">test_connection.py</a></dt>
<dd>Test of the connection types used by the package.</dd>
<dt><a class="reference" href="../test/test_doc.py">test_doc.py</a></dt>
<dd>A test of the default manager and proxy objects using <tt class="docutils literal"><span class="pre">doctest</span></tt>.</dd>
<dt><a class="reference" href="../test/test_newtype.py">test_newtype.py</a></dt>
<dt><a class="reference" href="../examples/ex_newtype.py">ex_newtype.py</a></dt>
<dd>Demonstration of how to create and use customized managers
and proxies.</dd>
<dt><a class="reference" href="../test/test_pool.py">test_pool</a></dt>
<dt><a class="reference" href="../examples/ex_pool.py">ex_pool.py</a></dt>
<dd>Test of the <tt class="docutils literal"><span class="pre">Pool</span></tt> class which represents a process pool.</dd>
<dt><a class="reference" href="../test/test_processing.py">test_processing.py</a></dt>
<dd>Test of synchronization types like locks, conditions and queues.
Takes quite a while to run because it uses <tt class="docutils literal"><span class="pre">time.sleep()</span></tt> a lot.</dd>
<dt><a class="reference" href="../test/test_reduction.py">test_reduction.py</a></dt>
<dd>Test that socket objects and connection objects can
be transfered between processes.</dd>
<dt><a class="reference" href="../test/test_stop.py">test_stop.py</a></dt>
<dd>A test of <tt class="docutils literal"><span class="pre">Process.stop()</span></tt> and <tt class="docutils literal"><span class="pre">Process.terminate()</span></tt>.</dd>
<dt><a class="reference" href="../test/test_workers.py">test_workers.py</a></dt>
<dt><a class="reference" href="../examples/ex_synchronize.py">ex_synchronize.py</a></dt>
<dd>Test of synchronization types like locks, conditions and queues.</dd>
<dt><a class="reference" href="../examples/ex_workers.py">ex_workers.py</a></dt>
<dd>A test showing how to use queues to feed tasks to a
collection of worker process and collect the results.</dd>
</dl>
</blockquote>
<p>In a full test run most of these tests will be run using both
processes and threads (by using the <tt class="docutils literal"><span class="pre">processing.dummy</span></tt> sub-package).</p>
<p>Also included in <tt class="docutils literal"><span class="pre">test/</span></tt> are</p>
<blockquote>
<dl class="docutils">
<dt><a class="reference" href="../test/test_httpserverpool.py">test_httpserverpool.py</a></dt>
<dt><a class="reference" href="../examples/ex_webserver.py">ex_webserver.py</a></dt>
<dd>An example of how a pool of worker processes can each
run a <tt class="docutils literal"><span class="pre">SimpleHTTPServer.HttpServer</span></tt> instance
while sharing a single listening socket.</dd>
<dt><a class="reference" href="../test/test_speed.py">test_speed.py</a></dt>
<dt><a class="reference" href="../examples/benchmarks.py">benchmarks.py</a></dt>
<dd>Some simple benchmarks comparing <tt class="docutils literal"><span class="pre">processing</span></tt> with <tt class="docutils literal"><span class="pre">threading</span></tt>.</dd>
</dl>
</blockquote>
<p>All the modules in the <tt class="docutils literal"><span class="pre">test</span></tt> folder use <tt class="docutils literal"><span class="pre">freezeSupport()</span></tt> so frozen
executables can be produced from them by using <tt class="docutils literal"><span class="pre">py2exe</span></tt>, <tt class="docutils literal"><span class="pre">PyInstaller</span></tt>
or <tt class="docutils literal"><span class="pre">cx_Freeze</span></tt> in the usual way.</p>
</div>

@@ -70,0 +52,0 @@ <div class="footer">

.. include:: header.txt
Tests
=====
Tests and Examples
==================
The `processing` package contains a `test` sub-package which
contains various test modules.
`processing` contains a `test` sub-package which contains unit tests
for the package. You can do a test run by doing ::
You can do a test run by doing:
python -m processing.tests
`python -m processing.test`
on Python 2.5 or ::
on Python 2.5 or
`python -c "from processing.test import main; main()"`
python -c "from processing.tests import main; main()"
on Python 2.4.
on Python 2.4. This will run many of the tests using processes,
threads, and processes with a manager.
This will run the tests in the following modules:
The `example` sub-package contains the following modules:
`test_connection.py <../test/test_connection.py>`_
Test of the connection types used by the package.
`test_doc.py <../test/test_doc.py>`_
A test of the default manager and proxy objects using `doctest`.
`test_newtype.py <../test/test_newtype.py>`_
`ex_newtype.py <../examples/ex_newtype.py>`_
Demonstration of how to create and use customized managers
and proxies.
`test_pool <../test/test_pool.py>`_
`ex_pool.py <../examples/ex_pool.py>`_
Test of the `Pool` class which represents a process pool.
`test_processing.py <../test/test_processing.py>`_
`ex_synchronize.py <../examples/ex_synchronize.py>`_
Test of synchronization types like locks, conditions and queues.
Takes quite a while to run because it uses `time.sleep()` a lot.
`test_reduction.py <../test/test_reduction.py>`_
Test that socket objects and connection objects can
be transfered between processes.
`test_stop.py <../test/test_stop.py>`_
A test of `Process.stop()` and `Process.terminate()`.
`test_workers.py <../test/test_workers.py>`_
`ex_workers.py <../examples/ex_workers.py>`_
A test showing how to use queues to feed tasks to a
collection of worker process and collect the results.
In a full test run most of these tests will be run using both
processes and threads (by using the `processing.dummy` sub-package).
Also included in `test/` are
`test_httpserverpool.py <../test/test_httpserverpool.py>`_
`ex_webserver.py <../examples/ex_webserver.py>`_
An example of how a pool of worker processes can each

@@ -59,12 +39,7 @@ run a `SimpleHTTPServer.HttpServer` instance

`test_speed.py <../test/test_speed.py>`_
`benchmarks.py <../examples/benchmarks.py>`_
Some simple benchmarks comparing `processing` with `threading`.
All the modules in the `test` folder use `freezeSupport()` so frozen
executables can be produced from them by using `py2exe`, `PyInstaller`
or `cx_Freeze` in the usual way.
.. _Prev: programming-guidelines.html
.. _Up: index.html
.. _Next: tests.html

@@ -15,6 +15,7 @@ <?xml version="1.0" encoding="utf-8" ?>

<blockquote>
Alexey Akimov, Michele Bertoldi, Josiah Carlson, Tim Couper,
Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Charlie Hull,
Richard Jones, Alexy Khrabrov, Gerald John M. Manipon, Kevin
Manley, Paul Rudin, Sandro Tosi, Dominique Wahli.</blockquote>
Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon,
Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann,
Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov,
Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan,
Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.</blockquote>
<p>Sorry if I have forgotten anyone.</p>

@@ -21,0 +22,0 @@ </div>

@@ -1,1 +0,1 @@

.. |version| replace:: 0.40
.. |version| replace:: 0.50

@@ -25,3 +25,3 @@ <?xml version="1.0" encoding="utf-8" ?>

<tr><th class="docinfo-name">Version:</th>
<td>0.40</td></tr>
<td>0.50</td></tr>
<tr class="field"><th class="docinfo-name">Licence:</th><td class="field-body">BSD Licence</td>

@@ -28,0 +28,0 @@ </tr>

@@ -0,1 +1,3 @@

.. default-role:: literal
============================

@@ -5,9 +7,14 @@ Installation of processing

Versions earlier than Python 2.4 are not supported.
Versions earlier than Python 2.4 are not supported. If you are using
Python 2.4 then you should install the `ctypes` package (which comes
automatically with Python 2.5).
If you are using Windows then binary builds for Python 2.4 and Python 2.5
are available at
Windows binary builds for Python 2.4 and Python 2.5 are available at
http://pyprocessing.berlios.de
or
http://cheeseshop.python.org/pypi/processing
Otherwise, if you have the correct C compiler setup then the source

@@ -19,33 +26,34 @@ distribution can be installed the usual way::

It should not be necessary to do any editing of `setup.py` if you are
using Windows, Mac OS X or (a recent) Linux. On other unices it may be
necessary to modify the values of the `macros` dictionary or
`libraries` list. The section to modify reads ::
using Windows, Mac OS X or Linux. On other unices it may be necessary
to modify the values of the `macros` dictionary or `libraries` list.
The section to modify reads ::
macros = dict(
# should we include support for posix semaphores?
USE_POSIX_SEMAPHORE=have_feature('SC_SEMAPHORES'),
# does semaphore support lack sem_timedwait()?
NO_SEM_TIMED=0,
)
# linux needs librt - other unices may not
libraries = ['rt']
else:
macros = dict(
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1
)
libraries = ['rt']
Note that if support for posix semaphores has not been compiled in
then many of the functions in the `processing` namespace like
`Lock()`, `Queue()` or `LocalManager()` will be available. However,
one can still create a manager using `manager = processing.Manager()`
and then do `lock = manager.Lock()` etc.
More details can be found in the comments in `setup.py`.
Note that if you use `HAVE_SEM_OPEN=0` then support for posix
semaphores will not been compiled in, and then many of the functions
in the `processing` namespace like `Lock()`, `Queue()` or will not be
available. However, one can still create a manager using `manager =
processing.Manager()` and then do `lock = manager.Lock()` etc.
Running test scripts
--------------------
Running tests
-------------
To run the test scripts using Python 2.5 do ::
python -m processing.test
python -m processing.tests
and on Python 2.4 do ::
python -c "from processing.test import main; main()"
python -c "from processing.tests import main; main()"
This will run a number of test scripts using both processes and threads.

@@ -6,4 +6,2 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
#
# This package is intended to duplicate the functionality (and much of

@@ -14,34 +12,40 @@ # the API) of threading.py but uses processes instead of threads. A

#
# Communication between processes is achieved using proxies which
# communicate with an manager using sockets (or name pipes).
# Try calling `processing.doc.main()` to read the html documentation in
# in a webbrowser.
#
# An example:
#
# from processing import Process, Queue
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
#
# def f(q):
# for i in range(10):
# q.put(i*i)
# q.put('STOP')
#
# if __name__ == '__main__':
# queue = Queue()
#
# p = Process(target=f, args=[queue])
# p.start()
#
# result = None
# while result != 'STOP':
# result = queue.get()
# print result
#
# p.join()
#
__version__ = '0.40'
__version__ = '0.50'
__all__ = [
'Process', 'currentProcess', 'activeChildren', 'freezeSupport',
'ProcessExit', 'Manager', 'Pipe', 'cpuCount',
'getLogger', 'enableLogging', 'BufferTooShort'
'Manager', 'Pipe', 'cpuCount', 'getLogger', 'enableLogging',
'BufferTooShort'
]

@@ -58,10 +62,16 @@

from process import Process, currentProcess, activeChildren
from process import freezeSupport, ProcessExit
from logger import SUBDEBUG, DEBUG, INFO, SUBWARNING, WARNING, NOTE
HAVE_NATIVE_SEMAPHORE = hasattr(_processing, 'Blocker')
#
#
#
HAVE_NATIVE_SEMAPHORE = hasattr(_processing, 'SemLock')
ORIGINAL_DIR = os.path.abspath(os.getcwd())
#
# Definitions not depending on native semaphores or queues
# Definitions not depending on native semaphores
#
ProcessError = _processing.ProcessError
BufferTooShort = _processing.BufferTooShort

@@ -111,3 +121,3 @@

else:
raise NotImplementedError
raise NotImplementedError, 'cannot determine number of cpus'

@@ -128,16 +138,22 @@ def getLogger():

def freezeSupport():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from processing import forking
forking.freezeSupport()
if HAVE_NATIVE_SEMAPHORE:
__all__ += [
'LocalManager', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Queue', 'Pool', 'TimeoutError'
'TimeoutError', 'Lock', 'RLock', 'Semaphore',
'BoundedSemaphore', 'Condition', 'Event', 'Queue', 'Pool',
'Value', 'Array'
]
def LocalManager():
'''
Returns a manager object which allows creation of data in shared memory
'''
from processing.localmanager import LocalManager
return LocalManager()
class TimeoutError(ProcessError):
pass

@@ -193,3 +209,3 @@ def Lock():

def Pool(processes=None):
def Pool(processes=None, initializer=None, initargs=()):
'''

@@ -199,5 +215,28 @@ Returns a process pool object

from processing.pool import Pool
return Pool(processes)
return Pool(processes, initializer, initargs)
class TimeoutError(Exception):
pass
def Value(typecode, value, lock=True):
'''
Returns a shared object
'''
from processing.sharedctypes import Value, synchronized
assert type(typecode) is str
obj = Value(typecode, value)
if lock and not hasattr(lock, 'acquire'):
lock = Lock()
if lock:
obj = synchronized(obj, lock)
return obj
def Array(typecode, sequence, lock=True):
'''
Returns a shared array
'''
from processing.sharedctypes import Array, synchronized
assert type(typecode) is str
obj = Array(typecode, sequence)
if lock and not hasattr(lock, 'acquire'):
lock = Lock()
if lock:
obj = synchronized(obj, lock)
return obj

@@ -6,3 +6,3 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#

@@ -19,13 +19,11 @@

from processing.process import currentProcess
from processing import _processing, currentProcess
from processing.finalize import Finalize
from processing.logger import subdebug
from processing._processing import *
from processing.reduction import connections_are_picklable
try:
import processing.reduction
from processing._processing import win32
except ImportError:
connections_are_picklable = False
else:
connections_are_picklable = True
pass

@@ -64,5 +62,5 @@ #

elif family == 'AF_PIPE':
result = tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
(os.getpid(), _nextid()))
return result
return tempfile.mktemp(
prefix=r'\\.\pipe\pyc-%d-%d-' % (os.getpid(), _nextid())
)
else:

@@ -85,3 +83,3 @@ raise ValueError, 'unrecognized family'

else:
raise ValueError, 'unrecognized address type'
raise ValueError, 'address type of %r unrecognized' % address

@@ -207,3 +205,4 @@ #

s1, s2 = socket.socketpair()
c1, c2 = Connection(s1.fileno()), Connection(s2.fileno())
c1 = _processing.Connection(s1.fileno())
c2 = _processing.Connection(s2.fileno())
s1.close()

@@ -213,5 +212,4 @@ s2.close()

fd1, fd2 = os.pipe()
c1, c2 = Connection(fd1), Connection(fd2)
os.close(fd1)
os.close(fd2)
c1 = _processing.Connection(fd1, duplicate=False)
c2 = _processing.Connection(fd2, duplicate=False)

@@ -228,28 +226,32 @@ return c1, c2

if duplex:
openmode = PIPE_ACCESS_DUPLEX
access = GENERIC_READ | GENERIC_WRITE
openmode = win32.PIPE_ACCESS_DUPLEX
access = win32.GENERIC_READ | win32.GENERIC_WRITE
obsize, ibsize = BUFSIZE, BUFSIZE
else:
openmode = PIPE_ACCESS_INBOUND
access = GENERIC_WRITE
openmode = win32.PIPE_ACCESS_INBOUND
access = win32.GENERIC_WRITE
obsize, ibsize = 0, BUFSIZE
h1 = CreateNamedPipe(
h1 = win32.CreateNamedPipe(
address, openmode,
PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
1, obsize, ibsize, NMPWAIT_WAIT_FOREVER, NULL
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
win32.PIPE_WAIT,
1, obsize, ibsize, win32.NMPWAIT_WAIT_FOREVER, win32.NULL
)
h2 = CreateFile(address, access, 0, NULL, OPEN_EXISTING, 0, NULL)
SetNamedPipeHandleState(h2, PIPE_READMODE_MESSAGE, None, None)
h2 = win32.CreateFile(
address, access, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
)
win32.SetNamedPipeHandleState(
h2, win32.PIPE_READMODE_MESSAGE, None, None
)
try:
ConnectNamedPipe(h1, NULL)
win32.ConnectNamedPipe(h1, win32.NULL)
except WindowsError, e:
if e.args[0] != ERROR_PIPE_CONNECTED:
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
raise
c1 = PipeConnection(h1)
c2 = PipeConnection(h2)
CloseHandle(h1)
CloseHandle(h2)
c1 = _processing.PipeConnection(h1, duplicate=False)
c2 = _processing.PipeConnection(h2, duplicate=False)
return c1, c2

@@ -278,25 +280,21 @@

subdebug('listener has bound address %r', self._address)
subdebug('listener bound to address %r', self._address)
if family == 'AF_UNIX':
self.close = Finalize(
self, SocketListener._finalize_socketlistener,
args=[self._address, self._family, self._socket],
exitpriority=0
self._unlink = Finalize(
self, os.unlink, args=(self._address,), exitpriority=0
)
else:
self._unlink = None
def accept(self):
s, self._last_accepted = self._socket.accept()
conn = Connection(s.fileno())
conn = _processing.Connection(s.fileno())
s.close()
return conn
@staticmethod
def _finalize_socketlistener(address, family, sock):
subdebug('closing listener with address=%r', address)
import os
try:
os.unlink(address)
except OSError:
pass
def close(self):
self._socket.close()
if self._unlink is not None:
self._unlink()

@@ -325,3 +323,3 @@

conn = Connection(s.fileno())
conn = _processing.Connection(s.fileno())
s.close()

@@ -342,7 +340,8 @@ return conn

self._address = address
handle = CreateNamedPipe(
address, PIPE_ACCESS_DUPLEX,
PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
NMPWAIT_WAIT_FOREVER, NULL
handle = win32.CreateNamedPipe(
address, win32.PIPE_ACCESS_DUPLEX,
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
win32.PIPE_WAIT,
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
)

@@ -360,18 +359,17 @@ self._handle_queue = [handle]

def accept(self):
newhandle = win32.CreateNamedPipe(
self._address, win32.PIPE_ACCESS_DUPLEX,
win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE |
win32.PIPE_WAIT,
win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
win32.NMPWAIT_WAIT_FOREVER, win32.NULL
)
handle = self._handle_queue.pop(0)
self._handle_queue.append(newhandle)
try:
ConnectNamedPipe(handle, NULL)
win32.ConnectNamedPipe(handle, win32.NULL)
except WindowsError, e:
if e.args[0] != ERROR_PIPE_CONNECTED:
if e.args[0] != win32.ERROR_PIPE_CONNECTED:
raise
newhandle = CreateNamedPipe(
self._address, PIPE_ACCESS_DUPLEX,
PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,
PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
NMPWAIT_WAIT_FOREVER, NULL
)
self._handle_queue.append(newhandle)
conn = PipeConnection(handle)
CloseHandle(handle)
return conn
return _processing.PipeConnection(handle, duplicate=False)

@@ -382,3 +380,3 @@ @staticmethod

for handle in queue:
CloseHandle(handle)
win32.CloseHandle(handle)

@@ -393,9 +391,10 @@ def PipeClient(address):

try:
WaitNamedPipe(address, 1000)
h = CreateFile(
address, GENERIC_READ | GENERIC_WRITE,
0, NULL, OPEN_EXISTING, 0, NULL
win32.WaitNamedPipe(address, 1000)
h = win32.CreateFile(
address, win32.GENERIC_READ | win32.GENERIC_WRITE,
0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL
)
except WindowsError, e:
if e.args[0] not in (ERROR_SEM_TIMEOUT, ERROR_PIPE_BUSY):
if e.args[0] not in (win32.ERROR_SEM_TIMEOUT,
win32.ERROR_PIPE_BUSY):
raise

@@ -407,6 +406,6 @@ else:

SetNamedPipeHandleState(h, PIPE_READMODE_MESSAGE, None, None)
conn = PipeConnection(h)
CloseHandle(h)
return conn
win32.SetNamedPipeHandleState(
h, win32.PIPE_READMODE_MESSAGE, None, None
)
return _processing.PipeConnection(h, duplicate=False)

@@ -417,4 +416,2 @@ #

import hmac, sha
class AuthenticationError(Exception):

@@ -424,2 +421,3 @@ pass

def deliver_challenge(connection, authkey):
import hmac, sha
assert type(authkey) is str, '%r is not a string' % authkey

@@ -441,2 +439,3 @@ try:

def answer_challenge(connection, authkey):
import hmac, sha
assert type(authkey) is str, '%r is not a string' % authkey

@@ -451,53 +450,1 @@ message = connection.recvbytes()

raise AuthenticationError, 'digest sent was rejected'
#
#
#
def _test(address):
c = Client(address, authenticate=True)
print 'received: %s' % c.recv()
if __name__ == '__main__':
from processing import Process
obj = ['this', range(10), None]
print 'authkey =', currentProcess().getAuthKey()
for f in families:
print '\nfamily =', f
l = Listener(family=f, authenticate=True)
p = Process(target=_test, args=[l.address])
p.start()
server = l.accept()
print 'sending: %s' % obj
server.send(obj)
p.join()
for duplex in (True, False):
print '\nusing Pipe(duplex=%s)' % duplex
r, w = Pipe(duplex=duplex)
print 'sending: ', obj
w.send(obj)
print 'received:', r.recv()
try:
r.send(1)
except OSError:
pass
else:
print 'expected OSError from r.send(...)'
try:
w.recv()
except OSError:
pass
else:
print 'expected OSError from w.recv(...)'

@@ -6,3 +6,3 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#

@@ -12,5 +12,4 @@

'Process', 'currentProcess', 'activeChildren', 'freezeSupport',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Queue',
'Manager', 'LocalManager', 'Pipe', 'cpuCount'
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'Manager', 'Pipe'
]

@@ -22,26 +21,16 @@

import threading, sys
import threading
import sys
import weakref
import array
from processing.dummy.managers import SyncManager as Manager
from processing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore, \
Condition, Event
from Queue import Queue, Full, Empty
from processing import cpuCount
from Queue import Queue
#
# Compatibility
#
try:
set
except NameError:
from sets import Set as set
#
#
#
LocalManager = Manager
class DummyProcess(threading.Thread):

@@ -57,12 +46,10 @@

self._pid = None
self._children = set()
self._children = weakref.WeakKeyDictionary()
self._parent = currentProcess()
def _Thread__bootstrap(self):
self._pid = threading._get_ident()
threading.Thread._Thread__bootstrap(self)
self._start_called = False
def start(self):
assert self._parent is currentProcess()
self._parent._children.add(self)
self._start_called = True
self._parent._children[self] = None
threading.Thread.start(self)

@@ -73,12 +60,6 @@

if not self.isAlive():
self._parent._children.discard(self)
self._parent._children.pop(self, None)
def getAuthKey(self):
pass
def setAuthKey(self, value):
pass
def getExitCode(self):
if self._Thread__stopped:
if self._start_called and not self.isAlive():
return 0

@@ -88,15 +69,56 @@ else:

def getPid(self):
return self._pid
#
#
#
Process = DummyProcess
currentProcess = threading.currentThread
currentProcess()._children = weakref.WeakKeyDictionary()
currentProcess()._children = set()
def activeChildren():
return list(currentProcess()._children)
children = currentProcess()._children
for p in list(children):
if not p.isAlive():
children.pop(p, None)
return list(children)
def freezeSupport():
pass
#
#
#
class Namespace(object):
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass

@@ -6,3 +6,3 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#

@@ -39,5 +39,5 @@

def Pipe():
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a,b), Connection(b, a)
return Connection(a, b), Connection(b, a)

@@ -58,7 +58,8 @@

return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
return self._in.not_empty()
self._in.not_empty.release()
return not self._in.empty()
def close(self):
self._in = self._out = None
del self._in, self._out
pass

@@ -6,3 +6,3 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#

@@ -13,2 +13,3 @@

from processing.logger import subdebug

@@ -18,11 +19,13 @@ __all__ = ['Finalize', '_run_finalizers']

_registry = {}
_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
_registry = {}
_counter = itertools.count()
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
assert callback is not None

@@ -37,5 +40,5 @@ if obj is not None:

self._kwargs = kwargs or {}
self._key = (exitpriority, self._counter.next())
self._key = (exitpriority, _counter.next())
self._registry[self._key] = self
_registry[self._key] = self

@@ -49,8 +52,11 @@ def __call__(self, wr=None):

try:
del self._registry[self._key]
del _registry[self._key]
except KeyError:
return False
subdebug('finalizer no longer registered')
else:
subdebug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = self._kwargs = None
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return True

@@ -63,5 +69,8 @@

try:
del self._registry[self._key]
del _registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None

@@ -72,5 +81,24 @@ def still_active(self):

'''
return self._key in self._registry
return self._key in _registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):

@@ -86,7 +114,9 @@ '''

else:
f = lambda p : p[0][0] is not None and p[0] >= minpriority
items = filter(f, Finalize._registry.iteritems())
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = filter(f, _registry.items())
items.sort(reverse=True)
for key, finalizer in items:
subdebug('calling %s', finalizer)
try:

@@ -97,41 +127,4 @@ finalizer()

traceback.print_exc()
Finalize._registry.clear()
if __name__ == '__main__':
result = []
class Foo(object):
pass
a = Foo()
Finalize(a, result.append, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = Finalize(b, result.append, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
Finalize(c, result.append, args=('c',))
d10 = Foo()
Finalize(d10, result.append, args=('d10',), exitpriority=1)
d01 = Foo()
Finalize(d01, result.append, args=('d01',), exitpriority=0)
d02 = Foo()
Finalize(d02, result.append, args=('d02',), exitpriority=0)
d03 = Foo()
Finalize(d03, result.append, args=('d03',), exitpriority=0)
_run_finalizers() # triggers callbacks for d10, d03, d02, d01 in that
# order; the callback for c will not be called
expected = ['a', 'b', 'd10', 'd03', 'd02', 'd01']
assert result == expected, '%s != %s' % (result, expected)
print 'got expected result: %s' % result
if minpriority is None:
_registry.clear()
+120
-187

@@ -6,3 +6,3 @@ #

#
# Copyright (c) 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#

@@ -16,6 +16,8 @@

import thread
import struct
import itertools
from processing import _processing, currentProcess
from processing import _processing
from processing.finalize import Finalize
from processing.logger import info
from processing.forking import PicklableOnlyForInheritance

@@ -25,51 +27,56 @@ __all__ = ['BufferWrapper']

#
# Class representing an mmap - can be inherited by child processes
# Inheirtable class which wraps an mmap, and from which blocks can be allocated
#
class MMapWrapper(object):
if sys.platform == 'win32':
__slots__ = ('__data', '__weakref__')
mmap = property(lambda self: self.__data[0])
size = property(lambda self: self.__data[1])
name = property(lambda self: self.__data[2])
def __init__(self, size):
fd, name = tempfile.mkstemp(prefix='pym-')
remaining = size
while remaining > 0:
remaining -= os.write(fd, '\0' * remaining)
mmap_ = mmap.mmap(fd, size)
os.close(fd)
self.__data = (mmap_, size, name)
from processing._processing import win32
if sys.platform in ('win32', 'cygwin'):
Finalize(
self, MMapWrapper._finalize_heap, args=[mmap_, name],
exitpriority=-10
)
else:
os.unlink(name)
class Arena(PicklableOnlyForInheritance):
def __getstate__(self):
assert sys.platform == 'win32'
return (self.name, self.size)
def __setstate__(self, state):
assert getattr(currentProcess(), '_unpickling', False), \
'mmaps should only be shared using process inheritance'
name, size = state
fd = os.open(name, os.O_RDWR | os.O_BINARY, int('0600', 8))
mmap_ = mmap.mmap(fd, size)
os.close(fd)
self.__data = (mmap_, size, name)
@staticmethod
def _finalize_heap(mmap, name):
import os
mmap.close()
os.unlink(name)
_nextid = itertools.count().next
def __init__(self, size):
self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), Arena._nextid())
self.buffer = mmap.mmap(0, self.size, tagname=self.name)
assert win32.GetLastError() == 0, 'tagname already in use'
self._state = (self.size, self.name)
def _setstate(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(0, self.size, tagname=self.name)
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
else:
class Arena(PicklableOnlyForInheritance):
def __init__(self, size):
if sys.version_info >= (2, 5, 0):
self.buffer = mmap.mmap(-1, size)
self.size = size
self.name = None
else:
fd, self.name = tempfile.mkstemp(prefix='pym-')
self.size = remaining = size
while remaining > 0:
remaining -= os.write(fd, '\0' * remaining)
self.buffer = mmap.mmap(fd, size)
os.close(fd)
if sys.platform == 'cygwin':
# cannot unlink file until it is no longer in use
def _finalize_heap(mmap, unlink):
mmap.close()
unlink(name)
Finalize(
self, _finalize_heap, args=(self.buffer, os.unlink),
exitpriority=-10
)
else:
os.unlink(name)
#
# Class allowing allocation of (unmovable) chunks of memory from mmaps
# Class allowing allocation of chunks of memory from arenas
#

@@ -79,5 +86,5 @@

_alignment = struct.calcsize('P')
def __init__(self, size=1024):
_alignment = 8
def __init__(self, size=4096):
self._lastpid = os.getpid()

@@ -88,14 +95,11 @@ self._lock = thread.allocate_lock()

self._len_to_seq = {}
self._start_to_location = {}
self._stop_to_location = {}
self._allocated_locations = set()
self._mmaps = []
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
def _roundup(self, n):
n = max(1, n)
q, r = divmod(n, self._alignment)
if r:
q += 1
return q * self._alignment
mask = self._alignment - 1
return (max(1, n) + mask) & ~mask
def _malloc(self, size):

@@ -107,56 +111,57 @@ # returns a large enough block -- it might be much larger

self._size *= 2
mmap = MMapWrapper(length)
self._mmaps.append(mmap)
return (mmap, 0, length)
info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
location = seq.pop()
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(mmap, start, stop) = location
del self._start_to_location[(mmap, start)]
del self._stop_to_location[(mmap, stop)]
return location
def _free(self, location):
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(mmap, start, stop) = location
(arena, start, stop) = block
try:
prev_location = self._stop_to_location[(mmap, start)]
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_location)
start, _ = self._absorb(prev_block)
try:
next_location = self._start_to_location[(mmap, stop)]
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_location)
location = (mmap, start, stop)
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(location)
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [location]
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_location[(mmap, start)] = location
self._stop_to_location[(mmap, stop)] = location
def _absorb(self, location):
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(mmap, start, stop) = location
del self._start_to_location[(mmap, start)]
del self._stop_to_location[(mmap, stop)]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(location)
seq.remove(block)
if not seq:

@@ -168,57 +173,29 @@ del self._len_to_seq[length]

def free(self, location):
def free(self, block):
# free a block returned by malloc()
assert os.getpid() == self._lastpid
self._lock.acquire()
try:
self._allocated_locations.remove(location)
self._free(location)
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
try:
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
size = self._roundup(size)
(mmap, start, stop) = self._malloc(size)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((mmap, new_stop, stop))
location = (mmap, start, new_stop)
self._allocated_locations.add(location)
return location
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
def _dump(self):
self._verify(dump=True)
def _verify(self, dump=False):
all = []
occupied = 0
for L in self._len_to_seq.values():
for mmap, start, stop in L:
all.append((self._mmaps.index(mmap), start, stop,
stop-start, 'free'))
for mmap, start, stop in self._allocated_locations:
all.append((self._mmaps.index(mmap), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
if dump:
for line in all:
print '%8s%8s%8s%8s %s' % line
lengths = [len(w.mmap) for w in self._mmaps]
print 'mmap sizes =', lengths
print 'total size =', sum(lengths)
for i in range(len(all)-1):
(mmap, start, stop) = all[i][:3]
(nmmap, nstart, nstop) = all[i+1][:3]
assert ((mmap != nmmap and nstart == 0) or (stop == nstart))
#

@@ -230,65 +207,21 @@ # Class representing a chunk of an mmap -- can be inherited

__slots__ = ('__data', '__weakref__')
_heap = Heap()
location = property(lambda self : self.__data[0])
size = property(lambda self : self.__data[1])
def __init__(self, size):
assert 0 <= size <= sys.maxint
location = BufferWrapper._heap.malloc(size)
self.__setstate__((location, size))
Finalize(self, BufferWrapper._heap.free, args=[self.location])
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,))
def getaddress(self):
w, start, stop = self.location
address, length = _processing.address_of_buffer(w.mmap)
assert self.size <= length
(arena, start, stop), size = self._state
address, length = _processing.address_of_buffer(arena.buffer)
assert size <= length
return address + start
def getview(self):
w, start, stop = self.location
return _processing.rwbuffer(w.mmap, start, self.size)
def __getstate__(self):
assert sys.platform == 'win32'
return self.__data
def __setstate__(self, state):
self.__data = state
#
# Test
#
(arena, start, stop), size = self._state
return _processing.rwbuffer(arena.buffer, start, size)
def test():
import random
iterations = 10000
maxblocks = 50
blocks = []
maxsize = 0
occ = 0
maxocc = 0
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = BufferWrapper(size)
occ += size
maxocc = max(maxocc, occ)
maxsize = max(maxsize, size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
occ -= size
BufferWrapper._heap._dump()
print 'max size of a block =', maxsize
print 'max size occupied =', maxocc
print 'currently occupied =', occ
if __name__ == '__main__':
test()
def getsize(self):
return self._state[1]

@@ -6,18 +6,20 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import sys
import logging
import atexit
import process # from . import process
__all__ = ['enableLogging', 'getLogger', 'subdebug',
'debug', 'info', 'subwarning', 'warning', 'note']
__all__ = ['enableLogging', 'getLogger', 'subdebug', 'debug', 'info', 'note']
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
WARNING = 30
NOTE = 35
_logger = None
def subdebug(msg, *args):

@@ -35,2 +37,14 @@ if _logger:

def subwarning(msg, *args):
if _logger:
_logger.subwarning(msg, *args)
def warning(msg, *args):
if _logger:
_logger.note(msg, *args)
else:
from processing import currentProcess
print >>sys.stderr, ('[WARNING/%s] ' + msg) % \
((currentProcess().getName(),) + args)
def note(msg, *args):

@@ -40,4 +54,5 @@ if _logger:

else:
from processing import currentProcess
print >>sys.stderr, ('[NOTE/%s] ' + msg) % \
((process.currentProcess().getName(),) + args)
((currentProcess().getName(),) + args)

@@ -57,3 +72,4 @@

global _logger
import logging
import logging, atexit
from processing import process

@@ -63,3 +79,3 @@ logging._acquireLock()

if _logger is None:
_logger = logging.getLogger('processing-7bb69610')
_logger = logging.getLogger('processing')
_logger.propagate = 0

@@ -72,9 +88,12 @@

return record
_logger.makeRecord = makeRecord.__get__(_logger, type(_logger))
MethodType = type(_logger.log)
_logger.makeRecord = MethodType(makeRecord, _logger)
_logger.subdebug = MethodType(_logger.log, SUBDEBUG)
_logger.subwarning = MethodType(_logger.log, SUBWARNING)
_logger.note = MethodType(_logger.log, NOTE)
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
logging.addLevelName(NOTE, 'NOTE')
logging.addLevelName(5, 'SUBDEBUG')
logging.addLevelName(31, 'NOTE')
_logger.subdebug = process._MethodType(_logger.log, 5)
_logger.note = process._MethodType(_logger.log, 31)
# cleanup func of `processing` should run before that of `logging`

@@ -81,0 +100,0 @@ atexit._exithandlers.remove((process._exit_func, (), {}))

@@ -7,3 +7,3 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#

@@ -20,2 +20,3 @@

import sys
import socket
import weakref

@@ -28,8 +29,9 @@ import threading

from processing.connection import Listener, Client, AuthenticationError
from processing.connection import Listener, Client, Pipe, AuthenticationError
from processing.connection import deliver_challenge, answer_challenge
from processing.process import Process, currentProcess
from processing.process import activeChildren, _register_afterfork
from processing.logger import subdebug, debug, info
from processing.logger import subdebug, debug, info, warning
from processing.finalize import Finalize, _run_finalizers
from processing.forking import exit

@@ -137,3 +139,3 @@ #

public = ['shutdown', 'create', 'accept_connection',
'getmethods', 'debug_info', 'dummy']
'getmethods', 'debug_info', 'dummy', 'incref', 'decref']

@@ -150,8 +152,6 @@ def __init__(self, registry, address, authkey):

if type(self.address) is tuple:
import socket
self.address = (socket.getfqdn(self.address[0]), self.address[1])
self.id_to_obj = {}
self.id_to_obj = {0: (None, ())}
self.id_to_refcount = {}
self.connection_to_idset = {}
self.mutex = threading.RLock()

@@ -168,3 +168,3 @@ self.stop = 0

c = self.listener.accept()
t = threading.Thread(target=self.handle_request, args=[c])
t = threading.Thread(target=self.handle_request, args=(c,))
t.setDaemon(True)

@@ -188,3 +188,3 @@ t.start()

ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecongized' % funcname
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)

@@ -208,16 +208,11 @@ except (SystemExit, KeyboardInterrupt):

raise
except:
except Exception, e:
if msg[0] == '#ERROR':
print >>sys.stderr, 'Failure to send exception: %s' % msg[1]
warning('Failure to send exception: %r', msg[1])
else:
print >>sys.stderr, 'Failure to send result: %s' % msg[1]
print >>sys.stderr
warning('Failure to send result: %r', msg[1])
warning(' ... request was %r', request)
warning(' ... exception was %r', e)
self.mutex.acquire()
try:
if c in self.connection_to_idset:
del self.connection_to_idset[c]
finally:
self.mutex.release()
c.close()
c.close()

@@ -228,2 +223,5 @@ def serve_client(self, connection):

'''
debug('starting server thread to service %r',
threading.currentThread().getName())
recv = connection.recv

@@ -272,6 +270,11 @@ send = connection.send

except EOFError:
debug('got EOF -- exiting thread serving %r',
threading.currentThread().getName())
sys.exit(0)
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
except:
msg = ('#ERROR', RemoteError())

@@ -292,3 +295,3 @@

token = Token('iter', self.address, res_ident)
result = IteratorProxy(token, auto_connect=False)
result = IteratorProxy(token, incref=False)
msg = ('#RETURN', result)

@@ -304,4 +307,7 @@ except (SystemExit, KeyboardInterrupt):

raise
except:
idset = self.connection_to_idset.pop(connection, None)
except Exception, e:
warning('exception in thread serving %r',
threading.currentThread().getName())
warning(' ... message was %r', msg)
warning(' ... exception was %r', e)
connection.close()

@@ -322,36 +328,5 @@ sys.exit(1)

def fallback_incref(self, connection, ident, obj):
self.mutex.acquire()
try:
try:
self.id_to_refcount[ident] += 1
except TypeError:
assert self.id_to_refcount[ident] is None
self.id_to_refcount[ident] = 1
self.connection_to_idset[connection].add(ident)
finally:
self.mutex.release()
def fallback_decref(self, connection, ident, obj):
self.mutex.acquire()
try:
self.connection_to_idset[connection].remove(ident)
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident]
del self.id_to_refcount[ident]
debug('disposing of obj with id=%d', ident)
if not self.connection_to_idset[connection]:
del self.connection_to_idset[connection]
connection.send(('#RETURN', None))
sys.exit()
finally:
self.mutex.release()
fallback_mapping = {
'__str__':fallback_str, '__repr__':fallback_repr,
'__cmp__':fallback_cmp, '#INCREF':fallback_incref,
'#DECREF':fallback_decref, '#GETVALUE':fallback_getvalue
'__cmp__':fallback_cmp, '#GETVALUE':fallback_getvalue
}

@@ -369,26 +344,13 @@

result = []
result.append('Object reference counts:')
keys = self.id_to_obj.keys()
keys.sort()
for ident in keys:
result.append(' %s: refcount=%s\n %s' %
(hex(ident), self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
result.append('\nConnections and the objects they reference:')
items = self.connection_to_idset.items()
items.sort()
for conn, ids in items:
result.append(' %s:' % conn)
for ident in ids:
result.append(' %s: %s' %
(hex(ident), str(self.id_to_obj[ident][0])[:63]))
if ident != 0:
result.append(' %s: refcount=%s\n %s' %
(hex(ident), self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
if len(result) > 2:
return '\n'.join(result)
def shutdown(self, c):

@@ -400,2 +362,4 @@ '''

info('manager received shutdown message')
# do some cleaning up
_run_finalizers(0)

@@ -409,5 +373,7 @@ for p in activeChildren():

_run_finalizers()
info('process exiting with `os._exit(0)`')
os._exit(0)
info('manager exiting with exitcode 0')
# now exit without waiting for other threads to finish
exit(0)
def create(self, c, typeid, *args, **kwds):

@@ -427,6 +393,7 @@ '''

debug('have created %r object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed))
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = None
self.connection_to_idset.get(c, set()).add(ident)
return ident, tuple(exposed)

@@ -448,5 +415,26 @@ finally:

c.send(('#RETURN', None))
self.connection_to_idset[c] = set()
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
try:
self.id_to_refcount[ident] += 1
except TypeError:
assert self.id_to_refcount[ident] is None
self.id_to_refcount[ident] = 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
debug('disposing of obj with id %d', ident)
finally:
self.mutex.release()
#

@@ -465,3 +453,3 @@ # Definition of BaseManager

connections. If `address` is None then an arbitrary one
is chosen (which will be available as `self.adress`).
is chosen (which will be available as `self.address`).

@@ -490,4 +478,4 @@ `authkey`:

# create a listener so that address of server can be retrieved
l = Listener()
# pipe over which we will retreive address of server
reader, writer = Pipe(duplex=False)

@@ -497,3 +485,3 @@ # spawn process which runs a server

target=self._run_server,
args=(self._registry, self._address, self._authkey, l.address),
args=(self._registry, self._address, self._authkey, writer),
)

@@ -503,13 +491,8 @@ ident = ':'.join(map(str, self._process._identity))

self._process.setAuthKey(self._authkey)
## try:
## self._process.setDaemon(True)
## except AssertionError:
## pass
self._process.start()
# get address of server
conn = l.accept()
self._address = conn.recv()
conn.close()
l.close()
writer.close()
self._address = reader.recv()
reader.close()

@@ -519,7 +502,8 @@ # register a finalizer

self, BaseManager._finalize_manager,
args=[self._process, self._address, self._authkey], exitpriority=0
args=(self._process, self._address, self._authkey),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, parent_address):
def _run_server(cls, registry, address, authkey, writer):
'''

@@ -533,8 +517,7 @@ Create a server, report its address and run it

# inform parent process of the server's address
connection = Client(parent_address)
connection.send(server.address)
connection.close()
writer.send(server.address)
writer.close()
# run the manager
info('manager bound to %r', server.address)
info('manager serving at %r', server.address)
server.serve_forever()

@@ -548,2 +531,3 @@

self._started = True
registry, _ = BaseManager._get_registry_creators(self)

@@ -648,3 +632,8 @@ server = Server(registry, self._address, self._authkey)

info('manager still alive after terminate')
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)

@@ -672,4 +661,5 @@

token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(token, authkey=self._authkey, exposed=exp)
proxy._manager = self
proxy = proxytype(
token, manager=self, authkey=self._authkey, exposed=exp
)
return proxy

@@ -697,2 +687,21 @@

#
# Subclasses of threading.local and set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
_register_afterfork(self, set.clear)
def __reduce__(self):
return type(self), ()
class ThreadLocalStorage(threading.local):
def __init__(self):
_register_afterfork(self, clear_namespace)
def __reduce__(self):
return type(self), ()
def clear_namespace(obj):
obj.__dict__.clear()
#
# Definition of BaseProxy

@@ -705,64 +714,46 @@ #

'''
__key_to_socket = {}
__connection_to_refcount = {}
__mutex = threading.RLock()
_address_to_local = {}
_mutex = threading.Lock()
def __init__(self, token, authkey=None, exposed=None, auto_connect=True):
def __init__(self, token, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = ThreadLocalStorage(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._key = (os.getpid(), threading._get_ident(), self._token.address)
self._id = self._token.id
# if we don't connect now then we must worry about refcounting
if auto_connect:
self._connect(authkey)
self._manager = manager
if incref:
self._incref()
_register_afterfork(self, BaseProxy._afterfork)
def _connect(self, authkey, name=None):
assert not hasattr(self, '_connection')
def _connect(self, authkey=None, name=None):
if authkey is None:
authkey = currentProcess().getAuthKey()
BaseProxy.__mutex.acquire()
try:
if self._key in BaseProxy.__key_to_socket:
self._connection = BaseProxy.__key_to_socket[self._key]
try:
debug('incref %s', [self._token.typeid, self._token.id])
self._callmethod('#INCREF')
except RemoteError:
del self._connection
raise KeyError, 'token not found: %s' % self._token
else:
BaseProxy.__connection_to_refcount[self._connection] += 1
else:
if name is None:
name = currentProcess().getName()
if threading.currentThread().getName() != 'MainThread':
name += '|' + threading.currentThread().getName()
debug('making connection to manager')
connection = Client(self._token.address, authkey=authkey)
dispatch(connection, None, 'accept_connection', (name,))
try:
debug('incref %s', [self._token.typeid, self._token.id])
dispatch(connection, self._id, '#INCREF')
except RemoteError:
connection.close()
raise KeyError, 'token not found: %s' % self._token
else:
BaseProxy.__connection_to_refcount[connection] = 1
BaseProxy.__key_to_socket[self._key] = connection
self._connection = connection
self._close = Finalize(
self, BaseProxy._finalize_proxy,
args=[self._connection, self._key, self._token],
exitpriority=0
)
finally:
BaseProxy.__mutex.release()
if name is None:
name = currentProcess().getName()
if threading.currentThread().getName() != 'MainThread':
name += '|' + threading.currentThread().getName()
debug('making connection to manager')
connection = Client(self._token.address, authkey=authkey)
dispatch(connection, None, 'accept_connection', (name,))
self._tls.connection = connection
def _callmethod(self, methodname, args=(), kwds={}):

@@ -772,4 +763,19 @@ '''

'''
return dispatch(self._connection, self._id, methodname, args, kwds)
try:
conn = self._tls.connection
except AttributeError:
debug('thread %r does not own a connection',
threading.currentThread().getName())
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#ERROR':
raise result
else:
raise ValueError
def _getvalue(self):

@@ -781,37 +787,51 @@ '''

def _incref(self):
authkey = currentProcess().getAuthKey()
connection = Client(self._token.address, authkey=authkey)
dispatch(connection, None, 'incref', (self._id,))
debug('INCREF %r', self._token.id)
assert self._id not in self._idset
self._idset.add(self._id)
shutdown = getattr(self._manager, 'shutdown', None)
self._close = Finalize(
self, BaseProxy._decref,
args=(self._token, authkey, shutdown, self._tls, self._idset),
exitpriority=10
)
@staticmethod
def _finalize_proxy(connection, key, token):
BaseProxy.__mutex.acquire()
try:
assert connection is BaseProxy.__key_to_socket[key]
assert BaseProxy.__connection_to_refcount[connection] >= 0
def _decref(token, authkey, shutdown, tls, idset):
idset.remove(token.id)
BaseProxy.__connection_to_refcount[connection] -= 1
# check whether manager is still alive
manager_still_alive = shutdown is None or shutdown.still_active()
if manager_still_alive:
# tell manager this process no longer cares about referent
try:
debug('decref %s', [token.typeid, token.id])
dispatch(connection, token.id, '#DECREF', (), {})
debug('DECREF %r', token.id)
connection = Client(token.address, authkey=authkey)
dispatch(connection, None, 'decref', (token.id,))
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
debug('... decref FAILED %s', e)
debug('... decref failed %s', e)
if BaseProxy.__connection_to_refcount[connection] == 0:
debug('closing connection to manager because no more proxies')
connection.close()
del BaseProxy.__key_to_socket[key]
del BaseProxy.__connection_to_refcount[connection]
else:
debug('DECREF %r -- manager already shutdown',
token.id)
finally:
BaseProxy.__mutex.release()
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
debug('thread %r has no more proxies so closing conn',
threading.currentThread().getName())
tls.connection.close()
del tls.connection
def _afterfork(self):
authkey = currentProcess().getAuthKey()
process_name = currentProcess().getName()
if hasattr(self, '_connection'):
del self._connection
self._key = (os.getpid(), threading._get_ident(), self._token.address)
try:
self._connect(authkey=authkey, name=process_name)
except KeyError, e:
debug('ignoring KeyError: %r', e)
self._manager = None
self._incref()

@@ -821,19 +841,12 @@ def __reduce__(self):

return (RebuildProxy, (MakeAutoProxy, self._token,
{'exposed':self._exposed}))
{'exposed': self._exposed}))
else:
return (RebuildProxy, (type(self), self._token))
return (RebuildProxy, (type(self), self._token, {}))
def __copy__(self):
if self._key[:2] == (os.getpid(), threading._get_ident()):
return self
else:
rebuild, args = self.__reduce__()
return rebuild(*args)
def __deepcopy__(self, memo):
return self._getvalue()
def __hash__(self):
raise NotImplementedError
raise NotImplementedError, 'proxies are unhashable'
def __repr__(self):

@@ -855,2 +868,10 @@ return '<Proxy[%s] object at %s>' % (self._token.typeid,

#
# Since BaseProxy._mutex might be locked at time of fork we reset it
#
def reset_mutex(obj):
obj._mutex = threading.Lock()
_register_afterfork(BaseProxy, reset_mutex)
#
# Function used for unpickling

@@ -866,9 +887,13 @@ #

server = getattr(currentProcess(), '_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
auto_connect = not getattr(currentProcess(), '_unpickling', False)
incref = (
kwds.pop('incref', True) and
not getattr(currentProcess(), '_inheriting', False)
)
try:
return func(token, authkey=None, auto_connect=auto_connect, **kwds)
return func(token, manager=None, authkey=None,
incref=incref, **kwds)
except AuthenticationError:

@@ -903,3 +928,4 @@ raise AuthenticationError, 'cannot rebuild proxy without authkey'

def MakeAutoProxy(token, authkey=None, exposed=None, auto_connect=True):
def MakeAutoProxy(token, manager=None, authkey=None,
exposed=None, incref=True):
'''

@@ -911,4 +937,3 @@ Return an auto-proxy for `token`

ProxyType = MakeAutoProxyType(exposed, token.typeid)
proxy = ProxyType(token, authkey=authkey, auto_connect=auto_connect)
proxy._exposed = exposed
proxy = ProxyType(token, manager=manager, authkey=authkey, incref=incref)
return proxy

@@ -942,8 +967,8 @@

class SharedValue(object):
class Value(object):
'''
Instances have a settable 'value' property
'''
def __init__(self, format, value):
self._format = format
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value

@@ -958,9 +983,6 @@

def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._format, self._value)
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
class SharedStruct(SharedValue):
pass
#

@@ -1043,5 +1065,5 @@ # Proxy type used by BaseManager

ListProxy = MakeAutoProxyType(_list_exposed, 'BaseListProxy')
BaseListProxy = MakeAutoProxyType(_list_exposed, 'BaseListProxy')
class ListProxy(ListProxy):
class ListProxy(BaseListProxy):
# augmented assignment functions must return self

@@ -1065,3 +1087,3 @@ def __iadd__(self, value):

class SharedValueProxy(BaseProxy):
class ValueProxy(BaseProxy):
def get(self):

@@ -1073,2 +1095,4 @@ return self._callmethod('get')

def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)

@@ -1103,4 +1127,3 @@ _arr_exposed = (

dict = CreatorMethod(dict, exposed=_dict_exposed)
SharedValue = CreatorMethod(SharedValue, SharedValueProxy)
SharedStruct = CreatorMethod(SharedStruct, SharedValueProxy)
SharedArray = CreatorMethod(array.array, exposed=_arr_exposed)
Value = CreatorMethod(Value, ValueProxy)
Array = CreatorMethod(Array, exposed=_arr_exposed)

@@ -6,3 +6,3 @@ #

#
# Copyright (c) 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#

@@ -24,3 +24,2 @@

from processing import Process
from processing.process import _sleep_until_neq
from processing.logger import debug

@@ -47,5 +46,2 @@ from processing.finalize import Finalize

def applystar(args):
return apply(*args)
#

@@ -55,5 +51,8 @@ # Code run by worker processes

def worker(inqueue, outqueue):
def worker(inqueue, outqueue, initializer=None, initargs=()):
put = outqueue.put
if initializer is not None:
initializer(*initargs)
for job, i, func, args, kwds in iter(inqueue.get, None):

@@ -67,3 +66,3 @@ try:

debug('worker got sentinel -- exiting')
#

@@ -77,3 +76,3 @@ # Class representing a process pool

'''
def __init__(self, processes=None):
def __init__(self, processes=None, initializer=None, initargs=()):
self._inqueue = SimpleQueue()

@@ -84,3 +83,3 @@ self._outqueue = SimpleQueue()

self._state = RUN
if processes is None:

@@ -93,3 +92,4 @@ try:

self._pool = [
Process(target=worker, args=[self._inqueue, self._outqueue])
Process(target=worker, args=(self._inqueue, self._outqueue,
initializer, initargs))
for i in range(processes)

@@ -101,7 +101,6 @@ ]

w.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=[self._taskqueue, self._inqueue, self._outqueue,
self._pool]
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool)
)

@@ -114,3 +113,3 @@ self._task_handler.setDaemon(True)

target=Pool._handle_results,
args=[self._outqueue, self._cache]
args=(self._outqueue, self._cache)
)

@@ -123,5 +122,5 @@ self._result_handler.setDaemon(True)

self, Pool._terminate_pool,
args=[self._taskqueue, self._inqueue, self._outqueue,
args=(self._taskqueue, self._inqueue, self._outqueue,
self._cache, self._pool, self._task_handler,
self._result_handler],
self._result_handler),
exitpriority=5

@@ -260,2 +259,3 @@ )

debug('result handler ignoring extra sentinel')
continue
job, i, obj = item

@@ -280,4 +280,5 @@ try:

def __reduce__(self):
raise NotImplementedError
raise NotImplementedError, \
'pool objects cannot be passed between processes or pickled'
def close(self):

@@ -293,4 +294,2 @@ debug('closing pool')

shutdown = terminate # depracated alias
def join(self):

@@ -341,2 +340,7 @@ debug('joining pool')

p.join()
debug('closing connections')
inqueue._reader.close()
outqueue._reader.close()
inqueue._writer.close()
outqueue._writer.close()

@@ -343,0 +347,0 @@ #

@@ -6,8 +6,7 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = [
'Process', 'currentProcess', 'activeChildren', 'freezeSupport',
'ProcessExit'
'Process', 'currentProcess', 'activeChildren'
]

@@ -21,23 +20,10 @@

import sys
import time
import signal
import subprocess
import time
import atexit
import weakref
import copy_reg
import itertools
import copy_reg
import cPickle
import _processing # from . import _processing
from finalize import Finalize, _run_finalizers # from .finalize import ...
from logger import info # form .logger import info
#
# `ProcessExit` exception
#
class ProcessExit(SystemExit):
pass
#
# Public functions

@@ -59,10 +45,9 @@ #

def freezeSupport():
def _cleanup():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
Purge `_children` of dead processes
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from processing import _nonforking
_nonforking.freezeSupport()
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)

@@ -80,11 +65,4 @@ #

def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
_current_process._counter += 1
counter = _current_process._counter
counter = _current_process._counter.next()
self._target = target
self._args = tuple(args)
self._kwargs = kwargs.copy()
self._stoppable = False
self._parent_pid = os.getpid()
self._popen = None
self._identity = _current_process._identity + (counter,)

@@ -94,5 +72,9 @@ self._authkey = _current_process._authkey

self._logargs = _current_process._logargs
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = kwargs.copy()
self._name = name or 'Process-' + ':'.join(map(str, self._identity))
self._counter = 0
self._children = set()

@@ -110,2 +92,3 @@ def run(self):

'''
from processing.forking import Popen
assert self._popen is None, 'cannot start a process twice'

@@ -115,14 +98,5 @@ assert self._parent_pid == os.getpid(), \

_cleanup()
sys.stdout.flush()
sys.stderr.flush()
self._popen = Popen(self, self._stoppable)
self._popen = Popen(self)
_current_process._children.add(self)
def stop(self):
'''
Raise `KeyboardInterrupt` in the process to stop it
'''
assert self._stoppable, '`setStoppable(True)` was not used'
self._popen.stop()
def terminate(self):

@@ -143,6 +117,5 @@ '''

elif timeout is None:
# avoid using `Popen.wait()` because that is uninterruptible
res = _sleep_until_neq(self._popen.poll, None, 1e100)
res = self._popen.wait()
else:
res = _sleep_until_neq(self._popen.poll, None, timeout)
res = self._popen.wait_timeout(timeout)
if res is not None:

@@ -187,3 +160,2 @@ _cleanup()

assert self._popen is None, 'process has already started'
assert hasattr(self, 'stop'), 'process needs a `stop()` method'
self._daemonic = daemonic

@@ -202,3 +174,2 @@

assert type(authkey) is str, 'value must be a string'
self._authkey = authkey

@@ -212,4 +183,2 @@

return self._popen
if self._popen.poll() == 0x10000:
return -signal.SIGTERM
return self._popen.poll()

@@ -221,17 +190,8 @@

'''
return self._popen and self._popen.pid
if self is _current_process:
return os.getpid()
else:
assert self._parent_pid == os.getpid()
return self._popen and self._popen.pid
def getStoppable(self):
'''
Returns whether process supports the `stop()` method and `ProcessExit`
'''
return self._stoppable
def setStoppable(self, value):
'''
Set whether process supports the `stop()` method and `ProcessExit`
'''
assert self._popen is None, 'process has already started'
self._stoppable = value
def __repr__(self):

@@ -262,6 +222,11 @@ if self is _current_process:

def _bootstrap(self):
from processing.finalize import _registry
from processing.logger import info
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
sys.stdin.close()
Finalize._registry.clear()
_registry.clear()
if sys.platform == 'win32' and self._logargs is not None:

@@ -272,12 +237,8 @@ from processing.logger import enableLogging

_afterfork()
info('child process calling self.run()')
try:
info('child process calling self.run()')
self.run()
exitcode = 0
finally:
sys.stdout.flush()
sys.stderr.flush()
_exit_func()
except ProcessExit:
exitcode = 127
except SystemExit, e:

@@ -297,4 +258,4 @@ if not e.args:

info('process exiting with `os.exit(...)`')
os._exit(exitcode)
info('process exiting with exitcode %d' % exitcode)
return exitcode

@@ -314,12 +275,6 @@ #

self._logargs = None
self._counter = 0
self._counter = itertools.count(1)
self._children = set()
self._authkey = ''.join('%02x' % ord(c) for c in os.urandom(16))
# calculate authentication key
try:
self._authkey = os.urandom(16).encode('hex')
except AttributeError:
import random
key = [chr(random.randrange(256)) for i in range(16)]
self._authkey = ''.join(key).encode('hex')

@@ -330,127 +285,6 @@ _current_process = _MainProcess()

#
# Private functions
#
def _cleanup():
'''
Purge `_children` of dead processes
'''
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
def _sleep_until_neq(func, value, timeout):
'''
Sleep until `func() != value` or timeout elapses
Returns the last value returned by `func()`
'''
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = func()
if res != value:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
#
# We define a subclass of `subprocess.Popen`.
# The constructor takes a process object as its argument.
#
if sys.platform != 'win32':
class Popen(subprocess.Popen):
def __init__(self, process_obj, ignore=None):
import random
subprocess._cleanup()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
random.seed()
process_obj._bootstrap()
os._exit(0)
if not hasattr(subprocess.Popen, '__del__'): # Python 2.4
subprocess._active.append(self)
else: # Python 2.5
self._child_created = True
def stop(self):
if self.returncode is None:
try:
os.kill(self.pid, STOP_PROCESS_SIGNAL)
except OSError:
if self.returncode is not None:
raise
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError:
if self.returncode is not None:
raise
else:
CREATE_NEW_PROCESS_GROUP = 0x00000200
class Popen(subprocess.Popen):
def __init__(self, process_obj, new_console=1):
from processing import _nonforking
subprocess.Popen.__init__(
self, _nonforking.get_commandline(),
stdin=subprocess.PIPE,
creationflags=new_console and CREATE_NEW_PROCESS_GROUP
)
prep_data = _nonforking.get_preparation_data(
process_obj._name, new_console
)
self.stdin.write(cPickle.dumps(prep_data, 2).encode('hex') + '\n')
self.stdin.write(cPickle.dumps(process_obj, 2).encode('hex'))
self.stdin.close()
def stop(self):
if self.returncode is None:
try:
_processing.GenerateConsoleCtrlEvent(1, self.pid)
except WindowsError:
if self.returncode is not None:
raise
def terminate(self):
if self.returncode is None:
try:
_processing.TerminateProcess(int(self._handle), 0x10000)
except WindowsError:
if self.returncode is not None:
raise
#
# In Python 2.5 `subprocess.Popen` has a `__del__()` method which often
# seems to raise `AttributeError` when the interpreter shuts down. We
# overwrite it.
#
if hasattr(Popen, '__del__'):
def __del__(self):
if Popen is not None:
try:
super(Popen, self).__del__()
except AttributeError:
pass
Popen.__del__ = __del__
del __del__
#
# Give names to some return codes
#
_exitcode_to_name = { 127 : 'ProcessExit' }
_exitcode_to_name = {}

@@ -462,22 +296,5 @@ for name, signum in signal.__dict__.items():

#
# Signal handling
#
if sys.platform == 'win32':
STOP_PROCESS_SIGNAL = signal.SIGBREAK
else:
STOP_PROCESS_SIGNAL = signal.SIGUSR1
def _STOP_PROCESS_handler(signum, frame):
raise ProcessExit
# On Windows signal handler is set in `_nonforking`
signal.signal(STOP_PROCESS_SIGNAL, _STOP_PROCESS_handler)
#
# Make bound and unbound instance methods and class methods picklable
#
_MethodType = type(Process.start)
def _reduce_method(m):

@@ -489,3 +306,3 @@ if m.im_self is None:

copy_reg.pickle(_MethodType, _reduce_method)
copy_reg.pickle(type(_current_process.start), _reduce_method)

@@ -497,9 +314,11 @@ #

_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_count = itertools.count()
def _afterfork():
for (ident, func), obj in _afterfork_registry.items():
# execute in order of registration
for (index, ident, func), obj in sorted(_afterfork_registry.items()):
func(obj)
def _register_afterfork(obj, func):
_afterfork_registry[(id(obj), func)] = obj
_afterfork_registry[(_afterfork_count.next(), id(obj), func)] = obj

@@ -511,2 +330,7 @@ #

def _exit_func():
from processing.finalize import _run_finalizers
from processing.logger import info
_current_process._exiting = True
info('running all "atexit" finalizers with priority >= 0')

@@ -516,14 +340,2 @@ _run_finalizers(0)

for p in activeChildren():
if p._daemonic and p._stoppable:
info('calling `stop()` for daemon %s', p.getName())
p._popen.stop()
deadline = time.time() + 0.1
for p in activeChildren():
if p._daemonic and p._stoppable:
info('calling `join(timeout)` for daemon %s', p.getName())
p.join(deadline - time.time())
for p in activeChildren():
if p._daemonic:

@@ -537,5 +349,5 @@ info('calling `terminate()` for daemon %s', p.getName())

info('running all "atexit" finalizers with priority < 0')
info('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_func)

@@ -6,3 +6,3 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#

@@ -22,10 +22,11 @@

from processing import _processing, Pipe
from processing import _processing, Pipe, currentProcess
from processing.synchronize import Lock, BoundedSemaphore
from processing.logger import debug
from processing.logger import debug, subwarning
from processing.finalize import Finalize
from processing.process import _exit_func, _register_afterfork
from processing.forking import PicklableOnlyForInheritance
#
# Ensure cleanup func of `processing` runs before that of `threading`
# Cleanup function of `processing` should run before that of `threading`
#

@@ -37,6 +38,6 @@

#
# Queue type based on a pipe -- uses a buffer and a thread
# Queue type using a pipe, buffer and thread
#
class Queue(object):
class Queue(PicklableOnlyForInheritance):

@@ -46,3 +47,8 @@ def __init__(self, maxsize=0):

rlock = Lock()
wlock = Lock()
if sys.platform == 'win32':
wlock = None
else:
wlock = Lock()
if maxsize < 0:
maxsize = 0
if maxsize == 0:

@@ -54,3 +60,3 @@ sem = None

state = maxsize, reader, writer, rlock, wlock, sem, os.getpid()
self.__setstate__(state)
self._setstate(state)

@@ -60,5 +66,5 @@ if sys.platform != 'win32':

def __setstate__(self, state):
def _setstate(self, state):
(self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._rlock, self._wlock, self._sem, self._opid) = self._state = state
self._send = self._writer.send

@@ -69,8 +75,2 @@ self._recv = self._reader.recv

def __getstate__(self):
assert sys.platform == 'win32'
assert not self._closed
return (self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def _afterfork(self):

@@ -88,13 +88,5 @@ debug('Queue._afterfork()')

assert not self._closed
if self._maxsize:
if block and timeout is None:
if self._sem:
self._sem.acquire()
else:
if not block:
timeout = 0.0
else:
timeout = max(0.0, timeout)
if not self._sem._block.acquire_timeout(timeout):
raise Full
if self._sem is not None:
if not self._sem.acquire(block, timeout):
raise Full

@@ -123,3 +115,3 @@ self._notempty.acquire()

def get(self, block=1, timeout=None):
def get(self, block=True, timeout=None):
if block and timeout is None:

@@ -134,15 +126,10 @@ self._rlock.acquire()

self._rlock.release()
else:
if not block:
timeout = 0.0
else:
timeout = max(0.0, timeout)
deadline = time.time() + timeout
if not self._rlock._block.acquire_timeout(timeout):
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
timeout = max(0.0, deadline - time.time())
if not self._poll(timeout):
if not self._poll(block and (deadline-time.time()) or 0.0):
raise Empty

@@ -157,6 +144,9 @@ res = self._recv()

def empty(self):
# Even more unreliable than Queue.Queue.empty(): True can be
# returned when enqueued items are buffered but none are
# yet in the pipe
return not self._poll()
def full(self):
return bool(self._sem) and self._sem._block._getvalue() == 0
return bool(self._sem) and self._sem._semlock._getvalue() == 0

@@ -171,2 +161,3 @@ def get_nowait(self):

self._closed = True
self._reader.close()
if self._close:

@@ -196,3 +187,4 @@ self._close()

target=Queue._feed,
args=[self._buffer, self._notempty, self._send, self._wlock],
args=(self._buffer, self._notempty, self._send,
self._wlock, self._writer.close),
name='QueueFeederThread'

@@ -202,3 +194,3 @@ )

debug('doing self._thread.start() %s' % self._thread)
debug('doing self._thread.start()')
self._thread.start()

@@ -240,3 +232,3 @@ debug('... done self._thread.start()')

def _finalize_close(buffer, notempty):
debug('telling thread used by a buffered queue to quit')
debug('telling queue thread to quit')
notempty.acquire()

@@ -250,24 +242,40 @@ try:

@staticmethod
def _feed(buffer, notempty, send, writelock):
def _feed(buffer, notempty, send, writelock, close):
debug('starting thread to feed data to pipe')
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
notempty.acquire()
nacquire()
try:
if not buffer:
notempty.wait()
nwait()
finally:
notempty.release()
nrelease()
try:
while 1:
obj = buffer.popleft()
if obj is _sentinel:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
writelock.acquire()
try:
if wacquire is None:
send(obj)
finally:
writelock.release()
else:
wacquire()
try:
send(obj)
finally:
wrelease()
except IndexError:

@@ -280,4 +288,5 @@ pass

# started to cleanup.
debug('error in queue thread: %s', e)
if not Finalize._exiting:
if getattr(currentProcess(), '_exiting', False):
subwarning('error in queue thread: %s', e)
else:
raise

@@ -291,3 +300,3 @@

class SimpleQueue(object):
class SimpleQueue(PicklableOnlyForInheritance):

@@ -300,3 +309,3 @@ def __init__(self):

state = reader, writer, Lock(), Lock()
self.__setstate__(state)
self._setstate(state)

@@ -306,4 +315,5 @@ def empty(self):

def __setstate__(self, state):
self._reader, self._writer, self._rlock, self._wlock = state
def _setstate(self, state):
(self._reader, self._writer, self._rlock, self._wlock) \
= self._state = state

@@ -333,6 +343,1 @@ recv = self._reader.recv

self.put = put
def __getstate__(self):
assert sys.platform == 'win32'
return self._reader, self._writer, self._rlock, self._wlock

@@ -8,3 +8,3 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#

@@ -18,4 +18,5 @@

import socket
import threading
from processing import _processing
from processing import _processing, currentProcess
from processing.logger import debug, subdebug

@@ -27,5 +28,5 @@

if (not hasattr(_processing, 'DuplicateHandle') and
not hasattr(_processing, 'recvfd')):
raise ImportError, 'Cannot transfer handles/descriptors between processes'
connections_are_picklable = (
sys.platform == 'win32' or hasattr(_processing, 'recvfd')
)

@@ -39,7 +40,2 @@ try:

return s
if sys.platform == 'win32':
closefd = _processing.CloseHandle
else:
closefd = os.close

@@ -52,4 +48,5 @@ #

import msvcrt
win32 = _processing
import msvcrt, _subprocess
from processing._processing import win32
closefd = _processing.win32.CloseHandle

@@ -62,37 +59,26 @@ #

subdebug('reducing handle %d', handle)
return (os.getpid(), handle)
flags = win32.GetHandleInformation(handle)
return (os.getpid(), handle, flags & win32.HANDLE_FLAG_INHERIT)
def rebuild_handle(reduced_handle):
pid, old_handle = reduced_handle
pid, old_handle, inheritable = reduced_handle
subdebug('rebuilding handle %d from PID=%d', old_handle, pid)
if inheritable and getattr(currentProcess(), '_inheriting', False):
return old_handle
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, pid
)
try:
new_handle = win32.DuplicateHandle(
process_handle, old_handle, win32.GetCurrentProcess(),
0, True, win32.DUPLICATE_SAME_ACCESS
new_handle = _subprocess.DuplicateHandle(
process_handle, old_handle, _subprocess.GetCurrentProcess(),
0, False, _subprocess.DUPLICATE_SAME_ACCESS
)
finally:
win32.CloseHandle(process_handle)
return new_handle
return new_handle.Detach()
#
# Register `file` with `copy_reg`
#
def reduce_file(f):
reduced_handle = reduce_handle(msvcrt.get_osfhandle(f.fileno()))
return rebuild_file, (reduced_handle, f.mode)
def rebuild_file(reduced_handle, mode):
fd = msvcrt.open_osfhandle(rebuild_handle(reduced_handle), 0)
return os.fdopen(fd, mode)
copy_reg.pickle(file, reduce_file)
#
# Register `_processing.PipeConnection` with `copy_reg`

@@ -103,9 +89,7 @@ #

return rebuild_pipe_connection, (reduce_handle(conn.fileno()),)
def rebuild_pipe_connection(reduced_handle):
handle = rebuild_handle(reduced_handle)
conn = _processing.PipeConnection(handle)
_processing.CloseHandle(handle)
return conn
return _processing.PipeConnection(handle, duplicate=False)
copy_reg.pickle(_processing.PipeConnection, reduce_pipe_connection)

@@ -124,5 +108,4 @@

#
closefd = os.close
import threading
_fd_cache = set()

@@ -149,2 +132,5 @@ _fd_lock = threading.Lock()

global _fd_listener
if not connections_are_picklable:
raise RuntimeError, 'pickling of file dscriptors not supported'

@@ -177,17 +163,2 @@ if _fd_listener is None:

#
# Register `file` with `copy_reg`
#
def reduce_file(f):
reduced_handle = reduce_handle(f.fileno())
return rebuild_file, (reduced_handle, f.mode)
def rebuild_file(reduced_handle, mode):
fd = rebuild_handle(reduced_handle)
return os.fdopen(fd, mode)
copy_reg.pickle(file, reduce_file)
#

@@ -197,13 +168,10 @@ # Register `_processing.Connection` with `copy_reg`

def reduce_socket_connection(conn):
reduced_handle = reduce_handle(conn.fileno())
return rebuild_socket_connection, (reduced_handle,)
def reduce_connection(conn):
return rebuild_connection, (reduce_handle(conn.fileno()),)
def rebuild_socket_connection(reduced_handle):
def rebuild_connection(reduced_handle):
fd = rebuild_handle(reduced_handle)
conn = _processing.Connection(fd)
closefd(fd)
return conn
return _processing.Connection(fd, duplicate=False)
copy_reg.pickle(_processing.Connection, reduce_socket_connection)
copy_reg.pickle(_processing.Connection, reduce_connection)

@@ -220,6 +188,3 @@ #

address = s.getsockname()
if type(address) is str:
Family = socket.AF_UNIX
else:
Family = socket.AF_INET
Family = type(address) is str and socket.AF_UNIX or socket.AF_INET
Type = s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)

@@ -226,0 +191,0 @@ Proto = 0

@@ -6,12 +6,14 @@ #

#
# Copyright (c) 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
import sys
import ctypes
import weakref
import ctypes
import sys
import copy_reg
from processing import heap, Lock
from processing import heap, RLock
from processing.forking import PicklableOnlyForInheritance, assert_spawning
__all__ = ['new_value', 'new_array', 'copy']
__all__ = ['Value', 'Array', 'copy', 'synchronized']

@@ -21,3 +23,16 @@ #

#
typecode_to_type = {
'c': ctypes.c_char, 'u': ctypes.c_wchar,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
#
#
#
def _new_value(type_):

@@ -28,4 +43,4 @@ size = ctypes.sizeof(type_)

def new_value(fmt_or_type, *args):
type_ = gettype(fmt_or_type)
def Value(typecode_or_type, *args):
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)

@@ -36,9 +51,7 @@ ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))

def new_array(fmt_or_type, size_or_initializer):
type_ = gettype(fmt_or_type)
if isinstance(type_, str):
type_ = _fmt_to_type[type_]
def Array(typecode_or_type, size_or_initializer):
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, int):
type_ = type_ * size_or_initializer
return new_value(type_)
return _new_value(type_)
else:

@@ -55,2 +68,20 @@ type_ = type_ * len(size_or_initializer)

def synchronized(obj, lock=None):
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = classcache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, makeproperty(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = classcache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)
#

@@ -61,3 +92,4 @@ # Functions for pickling/unpickling

def reduce_ctype(obj):
assert sys.platform == 'win32'
assert sys.platform == 'win32', \
'synchronized objects should only be shared through inheritance'
if isinstance(obj, ctypes.Array):

@@ -71,3 +103,4 @@ return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)

type_ = type_ * length
fixup(type_)
if sys.platform == 'win32' and type_ not in copy_reg.dispatch_table:
copy_reg.pickle(type_, reduce_ctype)
obj = type_.from_address(wrapper.getaddress())

@@ -77,63 +110,100 @@ obj._wrapper = wrapper

def fixup(type_):
if (sys.platform == 'win32' and type_.__reduce__ is not reduce_ctype):
type_.__reduce__ = reduce_ctype
#
# Function which converts format strings to ctype types
# Function to create properties
#
def gettype(fmt_or_type, mapping={}):
if not mapping:
for name in dir(ctypes):
if name[:2] == 'c_':
T = getattr(ctypes, name)
if hasattr(T, '_type_'):
mapping[T._type_] = T
mapping.update(i=ctypes.c_int, I=ctypes.c_uint,
l=ctypes.c_long, L=ctypes.c_ulong)
return mapping.get(fmt_or_type, fmt_or_type)
def makeproperty(name):
try:
return propcache[name]
except KeyError:
d = {}
exec template % ((name,)*7) in d
propcache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
propcache = {}
classcache = weakref.WeakKeyDictionary()
#
# Tests
# Synchronized wrappers
#
class _Foo(ctypes.Structure):
_fields_ = [
('x', ctypes.c_int),
('y', ctypes.c_double)
]
class SynchronizedBase(PicklableOnlyForInheritance):
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def _test(x, y, foo, arr, string):
x.value **= 2
y.value **= 2
foo.x **= 2
foo.y **= 2
for i in range(len(arr)):
arr[i] **= 2
string.value = string.value.upper()
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def test():
from processing import Process
def getobj(self):
return self._obj
x = new_value('i', 7)
y = new_value(ctypes.c_double, 1.0/3.0)
foo = new_value(_Foo, 3, 2)
arr = new_array('d', range(10))
string = new_array('c', 'hello world')
bar = copy(foo)
assert (foo.x, foo.y) == (bar.x, bar.y)
p = Process(target=_test, args=(x, y, foo, arr, string))
p.start()
p.join()
print x.value
print y.value
print (foo.x, foo.y)
print arr[:]
print string.value
if __name__ == '__main__':
test()
def getlock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = makeproperty('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
class SynchronizedString(SynchronizedArray):
value = makeproperty('value')
raw = makeproperty('raw')

@@ -6,3 +6,3 @@ #

#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#

@@ -16,3 +16,2 @@

import sys
import itertools

@@ -23,79 +22,40 @@ from struct import pack as _pack, unpack as _unpack, calcsize as _calcsize

from processing import _processing
from processing.process import currentProcess
from processing.process import currentProcess, _register_afterfork
from processing.logger import debug, subdebug
from processing.finalize import Finalize
from processing.forking import PicklableOnlyForInheritance
#
# Constants to describe the kind of blocker
# Constants to describe the kind of blocker (normal lock is a bounded sem)
#
MUTEX = 0
RECURSIVE_MUTEX = 1
SEMAPHORE = 2
BOUNDED_SEMAPHORE = 3
RECURSIVE_MUTEX, SEMAPHORE, BOUNDED_SEMAPHORE = range(3)
#
# Globals used for generating names
# Base class for semaphores and mutexes; wraps `_processing.SemLock`
#
_nextid = itertools.count().next
class SemLock(PicklableOnlyForInheritance):
#
# Base class for semaphores and mutexes; wraps `_processing.Blocker`
#
class Blocker(object):
def __init__(self, kind, value):
counter = _nextid()
name = '/pys-%s-%s' % (os.getpid(), counter)
self._block = _processing.Blocker(
name=name, create=True, kind=kind, value=value
)
debug('creating blocker with name %r' % name)
sl = self._semlock = _processing.SemLock(kind, value)
debug('created semlock with handle %s' % sl.handle)
self._setstate((sl.handle, sl.kind, sl.maxvalue))
if sys.platform != 'win32':
# On Unix we immediately unlink the name of the
# semaphore since otherwise the semaphore might not
# get removed (till the next reboot) if python gets
# killed. This means that `Blocker` objects are not
# picklable on Unix, but that does not prevent a child
# process from using a `Blocker` object inherited from
# its parent.
self._block._unlink()
def _afterfork(obj):
obj._semlock._afterfork()
_register_afterfork(self, _afterfork)
state = (kind, value, name)
self.__setstate__(state)
def _setstate(self, state):
self._state = state
if not hasattr(self, '_semlock'):
self._semlock = _processing.SemLock._rebuild(*state)
debug('recreated blocker with handle %r' % state[0])
def __getstate__(self):
if sys.platform != 'win32':
raise NotImplementedError
return self._state
def __setstate__(self, state):
(kind, value, name) = self._state = state
self._initvalue = value
if not hasattr(self, '_block'):
debug('opening blocker with name %r' % name)
self._block = _processing.Blocker(
name=name, create=False, kind=kind, value=value
)
self.acquire = self._semlock.acquire
self.release = self._semlock.release
self.__enter__ = self._semlock.__enter__
self.__exit__ = self._semlock.__exit__
if kind in (MUTEX, RECURSIVE_MUTEX) and sys.platform != 'win32':
# On Unix a semaphore masquerading as a mutex will not be
# automatically released when the process that owns it is
# terminated by `os._exit()`. To be safe we try to make sure
# `self._block._close()` will be called before `os._exit()`.
Finalize(self, self._block._close, exitpriority=-10)
self.acquire = self._block.acquire
self.release = self._block.release
def __enter__(self):
self.acquire()
return self
def __exit__(self, t, v, tb):
self.release()
#

@@ -105,13 +65,13 @@ # Semaphore

class Semaphore(Blocker):
class Semaphore(SemLock):
def __init__(self, value=1):
Blocker.__init__(self, SEMAPHORE, value)
SemLock.__init__(self, SEMAPHORE, value)
def getValue(self):
return self._block._getvalue()
return self._semlock._getvalue()
def __repr__(self):
try:
return '<Semaphore(value=%r)>' % self._block._getvalue()
return '<Semaphore(value=%r)>' % self._semlock._getvalue()
except (KeyboardInterrupt, SystemExit):

@@ -129,3 +89,3 @@ raise

def __init__(self, value=1):
Blocker.__init__(self, BOUNDED_SEMAPHORE, value)
SemLock.__init__(self, BOUNDED_SEMAPHORE, value)

@@ -135,3 +95,3 @@ def __repr__(self):

return '<BoundedSemaphore(value=%r, maxvalue=%r)>' % \
(self._block._getvalue(), self._initvalue)
(self._semlock._getvalue(), self._semlock.maxvalue)
except (KeyboardInterrupt, SystemExit):

@@ -143,13 +103,22 @@ raise

#
# Non-recursive lock -- releasing an unowned lock raises AssertionError
# Non-recursive lock
#
class Lock(Blocker):
class Lock(SemLock):
def __init__(self):
Blocker.__init__(self, MUTEX, 1)
SemLock.__init__(self, BOUNDED_SEMAPHORE, 1)
def __repr__(self):
try:
return '<Lock(ismine=%r)>' % bool(self._block._ismine())
if self._semlock._ismine():
name = currentProcess().getName()
if threading.currentThread().getName() != 'MainThread':
name += '|' + threading.currentThread().getName()
elif self._semlock._getvalue() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except (KeyboardInterrupt, SystemExit):

@@ -159,2 +128,4 @@ raise

return object.__repr__(self)
else:
return '<Lock(owner=%s)>' % name

@@ -165,17 +136,17 @@ #

class RLock(Blocker):
class RLock(SemLock):
def __init__(self, _name=None):
Blocker.__init__(self, RECURSIVE_MUTEX, 1)
def __init__(self):
SemLock.__init__(self, RECURSIVE_MUTEX, 1)
def __repr__(self):
try:
if self._block._ismine():
if self._semlock._ismine():
name = currentProcess().getName()
if threading.currentThread().getName() != 'MainThread':
name += '|' + threading.currentThread().getName()
return '<RLock(%s, %s)>' % (name, self._block._count())
elif self._block._getvalue() == 1:
return '<RLock(%s, %s)>' % (name, self._semlock._count())
elif self._semlock._getvalue() == 1:
return '<RLock(None, 0)>'
elif self._block._count() > 0:
elif self._semlock._count() > 0:
return '<RLock(SomeOtherThread, nonzero)>'

@@ -187,3 +158,2 @@ else:

except Exception:
raise
return object.__repr__(self)

@@ -195,9 +165,9 @@

class Condition(object):
class Condition(PicklableOnlyForInheritance):
def __init__(self, lock=None):
state = (lock or RLock(), Semaphore(0), Semaphore(0), Semaphore(0))
self.__setstate__(state)
self._setstate(state)
def __setstate__(self, state):
def _setstate(self, state):
(self._lock, self._sleeping_count,

@@ -208,9 +178,6 @@ self._woken_count, self._wait_semaphore) = self._state = state

def __getstate__(self):
return self._state
def __repr__(self):
try:
num_waiters = (self._sleeping_count._block._getvalue() -
self._woken_count._block._getvalue())
num_waiters = (self._sleeping_count._semlock._getvalue() -
self._woken_count._semlock._getvalue())
except (KeyboardInterrupt, SystemExit):

@@ -223,12 +190,10 @@ raise

def wait(self, timeout=None):
assert self._lock._block._ismine(), \
assert self._lock._semlock._ismine(), \
'must acquire() condition before using wait()'
# get number of times the lock has been acquired by this thread
count = self._lock._block._count()
# indicate that this thread will soon be waiting for notification
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in xrange(count):

@@ -239,8 +204,5 @@ self._lock.release()

# wait for notification or timeout
if timeout is None:
self._wait_semaphore.acquire()
else:
self._wait_semaphore._block.acquire_timeout(timeout)
self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread is no longer waiting
# indicate that this thread has woken
self._woken_count.release()

@@ -252,20 +214,41 @@

def notify(self, n=1):
assert self._lock._block._ismine(), \
'must acquire() condition before using notify()'
# wake up `count` waiting threads where `count` is the minimum of
# `n` and the value of the `self._sleeping_count` semaphore
# (which gets reduced by `count`)
count = 0
while count < n and self._sleeping_count.acquire(False):
self._wait_semaphore.release()
count += 1
def notify(self):
assert self._lock._semlock._ismine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
for i in xrange(count):
self._woken_count.acquire()
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for the sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notifyAll(self):
self.notify(sys.maxint)
assert self._lock._semlock._ismine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = self._sleeping_count.getValue()
if sleepers:
for i in xrange(sleepers):
self._sleeping_count.acquire() # grab a sleeper
self._wait_semaphore.release() # wake up a sleeper
for i in xrange(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def __enter__(self):

@@ -289,3 +272,3 @@ self.acquire()

def isSet(self):
return bool(self._flag._block._getvalue())
return bool(self._flag._semlock._getvalue())

@@ -305,3 +288,2 @@ def set(self):

self._flag.acquire(False)
assert not self.isSet()
finally:

@@ -317,2 +299,1 @@ self._cond.release()

self._cond.release()
Metadata-Version: 1.0
Name: processing
Version: 0.40
Version: 0.50
Summary: Package for using processes which mimics the threading module

@@ -9,5 +9,5 @@ Home-page: http://developer.berlios.de/projects/pyprocessing

License: BSD Licence
Description: ``processing`` is a package for the Python language which supports the
Description: `processing` is a package for the Python language which supports the
spawning of processes using the API of the standard library's
``threading`` module. It runs on both Unix and Windows.
`threading` module. It runs on both Unix and Windows.

@@ -22,6 +22,6 @@ Features:

* Equivalents of all the synchronization primitives in ``threading``
* Equivalents of all the synchronization primitives in `threading`
are available.
* A ``Pool`` class makes it easy to submit tasks to a pool of worker
* A `Pool` class makes it easy to submit tasks to a pool of worker
processes.

@@ -52,3 +52,3 @@

The ``processing.Process`` class follows the API of ``threading.Thread``.
The `processing.Process` class follows the API of `threading.Thread`.
For example ::

@@ -55,0 +55,0 @@

@@ -0,1 +1,2 @@

.. default-role:: literal
.. include:: doc/version.txt

@@ -13,5 +14,5 @@

``processing`` is a package for the Python language which supports the
`processing` is a package for the Python language which supports the
spawning of processes using the API of the standard library's
``threading`` module. It runs on both Unix and Windows.
`threading` module. It runs on both Unix and Windows.

@@ -26,6 +27,6 @@ Features:

* Equivalents of all the synchronization primitives in ``threading``
* Equivalents of all the synchronization primitives in `threading`
are available.
* A ``Pool`` class makes it easy to submit tasks to a pool of worker
* A `Pool` class makes it easy to submit tasks to a pool of worker
processes.

@@ -56,3 +57,3 @@

The ``processing.Process`` class follows the API of ``threading.Thread``.
The `processing.Process` class follows the API of `threading.Thread`.
For example ::

@@ -59,0 +60,0 @@

+108
-59

@@ -7,25 +7,44 @@ #

import sys
import glob
from glob import glob
from distutils.core import setup, Extension
if sys.version_info < (2, 4, 0):
raise ValueError, 'Versions of Python before 2.4 are not supported'
#
# Function to check for features using sysconf
# Macros and libraries
#
def have_feature(name):
try:
return int(os.sysconf(name) not in (0, -1))
except (ValueError, OSError):
return 0
# The `macros` dict determines the macros that will be defined when
# the C extension is compiled. Each value should be either 0 or 1.
# (An undefined macro is assumed to have value 0.) `macros` is only
# used on Unix platforms.
#
# Basic configuration
# The `libraries` dict determines the libraries to which the C
# extension will be linked. This should probably be either `['rt']`
# if you need `librt` or else `[]`.
#
# The `macros` dict determines the macros that will be defined when the
# C extension is compiled. Each value should be either 0 or 1.
# Meaning of macros
#
# The `libraries` dict determines the libraries to which the C
# extension will be linked. This should probably be either `['rt']`
# if you need `librt` or else `[]`.
# HAVE_SEM_OPEN
# Set this to 1 if you have `sem_open()`. This enables the use of
# posix named semaphores which are necessary for the
# implementation of the synchronization primitives on Unix. If
# set to 0 then the only way to create synchronization primitives
# will be via a manager (e.g. "m = Manager(); lock = m.Lock()").
#
# HAVE_SEM_TIMEDWAIT
# Set this to 1 if you have `sem_timedwait()`. Otherwise polling
# will be necessary when waiting on a semaphore using a timeout.
#
# HAVE_FD_TRANSFER
# Set this to 1 to compile functions for transferring file
# descriptors between processes over an AF_UNIX socket using a
# control message with type SCM_RIGHTS. On Unix the pickling of
# of socket and connection objects depends on this feature.
#
# HAVE_BROKEN_SEM_UNLINK
# Set to 1 if `sem_unlink()` is unnecessary. For some reason this
# seems to be the case on Cygwin where `sem_unlink()` is missing
# from semaphore.h.
#

@@ -35,30 +54,43 @@

macros = dict()
libraries = ['Ws2_32']
libraries = ['ws2_32']
elif sys.platform == 'cygwin':
elif sys.platform == 'darwin':
macros = dict(
USE_POSIX_SEMAPHORE=1,
NO_SEM_UNLINK=1, # `sem_unlink()` is missing from <semaphore.h>
NO_SENDFD=1 # cannot send file descriptors over sockets
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=0,
HAVE_FD_TRANSFER=1
)
libraries = [] # we don't have/need librt
elif sys.platform == 'darwin':
libraries = []
elif sys.platform == 'cygwin':
macros = dict(
USE_POSIX_SEMAPHORE=1,
NO_SEM_TIMED=1 # we don't have sem_timedwait()
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=0,
HAVE_BROKEN_SEM_UNLINK=1
)
libraries = [] # we don't have/need librt
libraries = []
else:
macros = dict(
# should we include support for posix semaphores?
USE_POSIX_SEMAPHORE=have_feature('SC_SEMAPHORES'),
# does semaphore support lack sem_timedwait()?
NO_SEM_TIMED=0,
HAVE_SEM_OPEN=1,
HAVE_SEM_TIMEDWAIT=1,
HAVE_FD_TRANSFER=1
)
# linux needs librt - other unices may not
libraries = ['rt']
#macros['Py_DEBUG'] = 1
#
# Print configuration info
#
print 'Macros:'
for name, value in sorted(macros.iteritems()):
print '\t%s = %r' % (name, value)
print '\nLibraries:\n\t%r\n' % libraries
#
# Compilation of `_processing` extension

@@ -69,6 +101,7 @@ #

sources = [
'src/win_processing.c',
'src/win_semaphore.c',
'src/processing.c',
'src/semaphore.c',
'src/pipe_connection.c',
'src/socket_connection.c'
'src/socket_connection.c',
'src/win_functions.c'
]

@@ -78,8 +111,8 @@

sources = [
'src/posix_processing.c',
'src/processing.c',
'src/socket_connection.c'
]
if macros.get('USE_POSIX_SEMAPHORE', False):
sources.append('src/posix_semaphore.c')
if macros.get('HAVE_SEM_OPEN', False):
sources.append('src/semaphore.c')

@@ -92,3 +125,3 @@ ext_modules = [

define_macros=macros.items(),
depends=glob('src/*.h')
depends=glob.glob('src/*.h') + ['setup.py']
)

@@ -98,13 +131,2 @@ ]

#
# Print configuration info
#
print 'Macros:'
for name, value in macros.items():
print '\t%s = %r' % (name, value)
print '\nLibraries:\n\t%r\n' % libraries
#
# Get version number

@@ -115,3 +137,3 @@ #

if line.startswith('__version__'):
version = eval(line.split()[-1].strip())
version = line.split()[-1].strip("'").strip('"')
break

@@ -137,14 +159,28 @@ else:

packages = ['processing', 'processing.dummy']
package_dir={'processing': 'lib'}
package_data = {}
packages = [
'processing',
'processing.dummy',
]
INCLUDE_EXTRA = True
package_dir = {
'processing': 'lib',
'processing.doc': 'doc',
'processing.tests': 'tests',
'processing.examples': 'examples'
}
if INCLUDE_EXTRA:
# include test files and html documentation in package directory
packages.extend(['processing.test','processing.doc'])
package_dir.update({'processing.test': 'test', 'processing.doc': 'doc'})
package_data['processing.doc'] = ['*.html', '*.css', '../*.html']
package_data = {
'processing.doc': ['*.html', '*.css', '../*.html']
}
INSTALL_EXTRA = True
if INSTALL_EXTRA:
# install test files and html documentation
packages.extend([
'processing.tests',
'processing.examples',
'processing.doc'
])
#

@@ -175,1 +211,14 @@ # Setup

)
#
# Check for ctypes
#
try:
import ctypes
except ImportError:
print >>sys.stderr, '''
WARNING: ctypes is not available which means that the use of shared
memory for storing data will not be supported. (ctypes is not
included with Python 2.4, but can be intsalled separately.)
'''
/*
* Definition of a `Connection` type.
* Used by `socket_connection.h` and `pipe_connection.h`.
* Used by `socket_connection.c` and `pipe_connection.c`.
*
* connection.h
*
* Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#ifndef _CONNECTION_H
#define _CONNECTION_H
#ifndef CONNECTION_H
#define CONNECTION_H
#define BUFFER_SIZE 1024
/*
* Allocation and deallocation
*/
#define CHECKHANDLE(self) \
if (self->handle == INVALID_HANDLE) { \
PyErr_SetString(PyExc_OSError, "handle is invalid"); \
return NULL; \
static PyObject *
Connection_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
Connection *self;
HANDLE handle;
BOOL make_duplicate = TRUE, make_inheritable = TRUE;
static char *kwlist[] = {"handle", "duplicate", "inheritable", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, F_HANDLE "|ii", kwlist,
&handle, &make_duplicate, &make_inheritable))
return NULL;
if (handle == INVALID_HANDLE_VALUE || (Py_ssize_t)handle < 0) {
PyErr_Format(PyExc_IOError, "invalid handle %" PY_FORMAT_SIZE_T "d",
(Py_ssize_t)handle);
return NULL;
}
self = (Connection*)type->tp_alloc(type, 0);
if (self == NULL)
return NULL;
typedef struct {
PyObject_HEAD
_HANDLE handle;
char buffer[BUFFER_SIZE];
} Connection;
self->weakreflist = NULL;
PyTypeObject CONNECTION_TYPE;
if (make_duplicate)
self->handle = DUPLICATE(handle);
else
self->handle = handle;
if (self->handle == INVALID_HANDLE_VALUE) {
self->ob_type->tp_free((PyObject*)self);
return SetException(PyExc_IOError, STANDARD_ERROR);
}
#ifdef MS_WINDOWS
if (make_inheritable) {
if (!SetHandleInformation(self->handle, HANDLE_FLAG_INHERIT,
HANDLE_FLAG_INHERIT))
{
PyErr_SetExcFromWindowsErr(PyExc_IOError, 0);
Py_BEGIN_ALLOW_THREADS
CloseHandle(self->handle);
Py_END_ALLOW_THREADS
SetLastError(0);
self->ob_type->tp_free((PyObject*)self);
return NULL;
}
}
#endif
return (PyObject*)self;
}
static void
Connection_dealloc(Connection* self)
{
if (self->weakreflist != NULL)
PyObject_ClearWeakRefs((PyObject*)self);
if (self->handle != INVALID_HANDLE_VALUE) {
Py_BEGIN_ALLOW_THREADS
CLOSE(self->handle);
Py_END_ALLOW_THREADS
}
self->ob_type->tp_free((PyObject*)self);
}
/*

@@ -37,7 +92,5 @@ * Functions for transferring buffers

char *buffer;
Py_ssize_t length;
int res;
Py_ssize_t length;
CHECKHANDLE(self);
if (!PyArg_ParseTuple(args, "s#", &buffer, &length))

@@ -47,7 +100,7 @@ return NULL;

Py_BEGIN_ALLOW_THREADS
res = send_string(self->handle, buffer, length);
res = conn_send_string(self, buffer, length);
Py_END_ALLOW_THREADS
if (res < 0)
return SetExcFromNumber(res);
return SetException(PyExc_IOError, res);

@@ -64,16 +117,14 @@ Py_RETURN_NONE;

CHECKHANDLE(self);
Py_BEGIN_ALLOW_THREADS
nbytes = recv_string(self->handle, self->buffer, BUFFER_SIZE, &freeme);
nbytes = conn_recv_string(self, self->buffer, BUFFER_SIZE, &freeme);
Py_END_ALLOW_THREADS
if (nbytes < 0) {
SetExcFromNumber(nbytes);
SetException(PyExc_IOError, nbytes);
} else {
if (freeme == NULL) {
result = Py_BuildValue("s#", self->buffer, nbytes);
result = PyString_FromStringAndSize(self->buffer, nbytes);
} else {
result = Py_BuildValue("s#", freeme, nbytes);
free(freeme);
result = PyString_FromStringAndSize(freeme, nbytes);
PyMem_Free(freeme);
}

@@ -89,9 +140,6 @@ }

char *freeme = NULL, *buffer = NULL;
Py_ssize_t nbytes, length;
int offset=0;
Py_ssize_t nbytes, length, offset=0;
PyObject *result = NULL;
CHECKHANDLE(self);
if (!PyArg_ParseTuple(args, "w#|i", &buffer, &length, &offset))
if (!PyArg_ParseTuple(args, "w#|" F_PY_SSIZE_T, &buffer, &length, &offset))
return NULL;

@@ -107,17 +155,17 @@

return NULL;
}
}
Py_BEGIN_ALLOW_THREADS
nbytes = recv_string(self->handle, buffer+offset, length-offset, &freeme);
nbytes = conn_recv_string(self, buffer+offset, length-offset, &freeme);
Py_END_ALLOW_THREADS
if (nbytes < 0) {
SetExcFromNumber(nbytes);
SetException(PyExc_IOError, nbytes);
} else {
if (freeme == NULL) {
result = Py_BuildValue("i", nbytes);
result = PyInt_FromSsize_t(nbytes);
} else {
result = PyObject_CallFunction(BufferTooShort,
"s#", freeme, nbytes);
free(freeme);
PyMem_Free(freeme);
PyErr_SetObject(BufferTooShort, result);

@@ -137,3 +185,3 @@ Py_XDECREF(result);

static PyObject *
Connection_send_obj(Connection *self, PyObject *args)
Connection_send_obj(Connection *self, PyObject *obj)
{

@@ -143,28 +191,26 @@ char *buffer;

Py_ssize_t length;
PyObject *obj = NULL, *pickled_string = NULL;
PyObject *pickled_string = NULL;
CHECKHANDLE(self);
pickled_string = PyObject_CallFunctionObjArgs(dumpsFunction, obj,
protocol, NULL);
if (!PyArg_ParseTuple(args, "O", &obj))
goto ERR;
pickled_string = PyObject_CallFunction(dumpsFunction, "Oi", obj, 2);
if (!pickled_string)
goto ERR;
goto failure;
if (PyString_AsStringAndSize(pickled_string, &buffer, &length) != 0)
goto ERR;
goto failure;
if (length > 0x7fffffff) {
if (TOO_LONG(length)) {
PyErr_SetString(PyExc_ValueError, "string too long");
goto ERR;
goto failure;
}
Py_BEGIN_ALLOW_THREADS
res = send_string(self->handle, buffer, (int)length);
res = conn_send_string(self, buffer, (int)length);
Py_END_ALLOW_THREADS
if (res < 0)
return SetExcFromNumber(res);
if (res < 0) {
SetException(PyExc_IOError, res);
goto failure;
}

@@ -174,3 +220,3 @@ Py_XDECREF(pickled_string);

ERR:
failure:
Py_XDECREF(pickled_string);

@@ -187,10 +233,8 @@ return NULL;

CHECKHANDLE(self);
Py_BEGIN_ALLOW_THREADS
nbytes = recv_string(self->handle, self->buffer, BUFFER_SIZE, &freeme);
nbytes = conn_recv_string(self, self->buffer, BUFFER_SIZE, &freeme);
Py_END_ALLOW_THREADS
if (nbytes < 0) {
SetExcFromNumber(nbytes);
SetException(PyExc_IOError, nbytes);
} else {

@@ -203,3 +247,3 @@ if (freeme == NULL) {

freeme, nbytes);
free(freeme);
PyMem_Free(freeme);
}

@@ -216,30 +260,34 @@ }

static PyObject *
Connection_fileno(Connection* self)
{
CHECKHANDLE(self);
return Py_BuildValue("i", self->handle);
}
static PyObject *
Connection_poll(Connection *self, PyObject *args)
{
PyObject *timeout_obj = NULL;
double timeout = 0.0;
int res;
CHECKHANDLE(self);
if (! PyArg_ParseTuple(args, "|d", &timeout))
if (!PyArg_ParseTuple(args, "|O", &timeout_obj))
return NULL;
if (timeout_obj == NULL) {
timeout = 0.0;
} else if (timeout_obj == Py_None) {
timeout = -1.0; /* block forever */
} else {
timeout = PyFloat_AsDouble(timeout_obj);
if (PyErr_Occurred())
return NULL;
if (timeout < 0.0)
timeout = 0.0;
}
Py_BEGIN_ALLOW_THREADS
res = poll(self->handle, timeout);
res = conn_poll(self, timeout);
Py_END_ALLOW_THREADS
if (res == TRUE) {
switch (res) {
case TRUE:
Py_RETURN_TRUE;
} else if (res == FALSE) {
case FALSE:
Py_RETURN_FALSE;
} else {
return SetExcFromNumber(res);
default:
return SetException(PyExc_IOError, res);
}

@@ -249,9 +297,19 @@ }

static PyObject *
Connection_close(Connection* self)
Connection_fileno(Connection* self)
{
if (self->handle != INVALID_HANDLE) {
if (self->handle == INVALID_HANDLE_VALUE) {
PyErr_SetString(PyExc_IOError, "handle is invalid");
return NULL;
}
return PyInt_FromLong((long)self->handle);
}
static PyObject *
Connection_close(Connection *self)
{
if (self->handle != INVALID_HANDLE_VALUE) {
Py_BEGIN_ALLOW_THREADS
_close(self->handle);
CLOSE(self->handle);
Py_END_ALLOW_THREADS
self->handle = INVALID_HANDLE;
self->handle = INVALID_HANDLE_VALUE;
}

@@ -262,50 +320,29 @@

static void
Connection_dealloc(Connection* self)
static PyObject *
Connection_repr(Connection *self)
{
Py_XDECREF(Connection_close(self));
self->ob_type->tp_free((PyObject*)self);
return PyString_FromFormat("%s(handle=%" PY_FORMAT_SIZE_T "d)",
CONNECTION_NAME, (Py_ssize_t)self->handle);
}
/*
* Getters and setters
*/
static PyObject *
Connection_new(PyTypeObject *type, PyObject *args)
Connection_closed(Connection *self, void *closure)
{
Connection *self;
_HANDLE handle;
self = (Connection*)type->tp_alloc(type, 0);
if (self == NULL)
return NULL;
if (! PyArg_ParseTuple(args, "i", &handle)) {
Py_DECREF(self);
return NULL;
}
self->handle = _duplicate(handle);
if (self->handle < 0) {
self->ob_type->tp_free((PyObject*)self);
return SetExcFromNumber(-1);
}
return (PyObject*)self;
return PyBool_FromLong(self->handle == INVALID_HANDLE_VALUE);
}
/*
* Method table
*/
static PyMethodDef Connection_methods[] = {
{"close", (PyCFunction)Connection_close, METH_NOARGS,
"close the connection"},
{"fileno", (PyCFunction)Connection_fileno, METH_NOARGS,
"file descriptor or handle of the connection"},
{"recv", (PyCFunction)Connection_recv_obj, METH_NOARGS,
"receive a (picklable) object"},
{"send", (PyCFunction)Connection_send_obj, METH_VARARGS,
"send a (picklable) object"},
{"sendbytes", (PyCFunction)Connection_sendbytes, METH_VARARGS,
"send the byte data from a readable buffer-like object"},
{"recvbytes", (PyCFunction)Connection_recvbytes, METH_NOARGS,
"receive byte data as a string"},
{"sendbytes", (PyCFunction)Connection_sendbytes, METH_VARARGS,
"send the byte data from a readable buffer-like (such as a string)"},
{"recvbytes_into", (PyCFunction)Connection_recvbytes_into, METH_VARARGS,

@@ -315,4 +352,13 @@ "receive byte data into a writeable buffer-like object\n"

{"send", (PyCFunction)Connection_send_obj, METH_O,
"send a (picklable) object"},
{"recv", (PyCFunction)Connection_recv_obj, METH_NOARGS,
"receive a (picklable) object"},
{"poll", (PyCFunction)Connection_poll, METH_VARARGS,
"whether there is any input available to be read"},
{"fileno", (PyCFunction)Connection_fileno, METH_NOARGS,
"file descriptor or handle of the connection"},
{"close", (PyCFunction)Connection_close, METH_NOARGS,
"close the connection"},

@@ -322,50 +368,68 @@ {NULL} /* Sentinel */

/*
* Member table
*/
static PyGetSetDef Connection_getsetters[] = {
{"closed", (getter)Connection_closed, NULL,
"True if the connection is closed", NULL},
{NULL}
};
/*
* Connection type
*/
PyTypeObject CONNECTION_TYPE = {
PyObject_HEAD_INIT(NULL)
0, /*ob_size*/
CONNECTION_NAME, /*tp_name*/
sizeof(Connection), /*tp_basicsize*/
0, /*tp_itemsize*/
0, /* ob_size */
"_processing." CONNECTION_NAME,
/* tp_name */
sizeof(Connection), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)Connection_dealloc,
/*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_compare*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
/*tp_flags*/
"Connection type.\n"
"The constructor takes an fd/handle as its argument.\n"
"The instance uses a duplicated copy of the fd/handle.",
/*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
Connection_methods, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
(newfunc)Connection_new, /*tp_new*/
/* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_compare */
(reprfunc)Connection_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_WEAKREFS,
/* tp_flags */
"Connection type whose constructor signature is\n\n"
" Connection(handle, duplicate=True, inheritable=True).\n\n"
"If duplicate is true then the connection uses a copy of handle;\n"
"otherwise the connection claims ownership of the handle.\n"
"On Windows inheritable determines whether the handle is made\n"
"inheritable; on Unix it is ignored.",
/* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
offsetof(Connection, weakreflist),
/* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
Connection_methods, /* tp_methods */
0, /* tp_members */
Connection_getsetters, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
(newfunc)Connection_new, /* tp_new */
};
#endif /* _CONNECTION_H */
#endif /* CONNECTION_H */

@@ -6,15 +6,148 @@ /*

*
* Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "processing_defs.h"
#include "structmember.h"
#include "processing.h"
extern PyObject *dumpsFunction, *loadsFunction;
extern PyObject *BufferTooShort;
#define CLOSE(h) CloseHandle(h)
#define DUPLICATE(h) duplicate_handle(h)
#define CONNECTION_NAME "_processing.PipeConnection"
/*
* Connection struct
*/
#define BUFFER_SIZE 1024
typedef struct {
PyObject_HEAD
HANDLE handle;
PyObject *weakreflist;
char buffer[BUFFER_SIZE];
} Connection;
/*
* Send string to the pipe; assumes in message oriented mode
*/
static Py_ssize_t
conn_send_string(Connection *conn, char *string, size_t length)
{
DWORD amount_written;
if (!WriteFile(conn->handle, string, length, &amount_written, NULL))
return STANDARD_ERROR;
/* assert(length == amount_written); */
return SUCCESS;
}
/*
* Attempts to read into buffer, or if buffer too small into *newbuffer.
*
* Returns number of bytes read. Assumes in message oriented mode.
*/
static Py_ssize_t
conn_recv_string(Connection *conn, char *buffer,
size_t buflength, char **newbuffer)
{
DWORD left, length, full_length, err;
*newbuffer = NULL;
if (ReadFile(conn->handle, buffer, buflength, &length, NULL))
return length;
err = GetLastError();
if (err != ERROR_MORE_DATA) {
if (err == ERROR_BROKEN_PIPE)
return END_OF_FILE;
return STANDARD_ERROR;
}
if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, NULL, &left))
return STANDARD_ERROR;
full_length = length + left;
if (TOO_LONG(full_length))
return BAD_MESSAGE_LENGTH;
*newbuffer = PyMem_Malloc(full_length);
if (*newbuffer == NULL)
return MEMORY_ERROR;
memcpy(*newbuffer, buffer, length);
if (ReadFile(conn->handle, *newbuffer+length, left, &length, NULL)) {
assert(length == left);
return full_length;
} else {
PyMem_Free(*newbuffer);
return STANDARD_ERROR;
}
}
/*
* Check whether any data is available for reading
*/
#define conn_poll(conn, timeout) conn_poll_save(conn, timeout, _save)
static int
conn_poll_save(Connection *conn, double timeout, PyThreadState *_save)
{
DWORD bytes, deadline, delay;
int difference, res;
BOOL block = FALSE;
if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))
return STANDARD_ERROR;
if (timeout == 0.0)
return bytes > 0;
if (timeout < 0.0)
block = TRUE;
else
/* XXX does not check for overflow */
deadline = GetTickCount() + (DWORD)(1000 * timeout + 0.5);
Sleep(0);
for (delay = 1 ; ; delay += 1) {
if (!PeekNamedPipe(conn->handle, NULL, 0, NULL, &bytes, NULL))
return STANDARD_ERROR;
else if (bytes > 0)
return TRUE;
if (!block) {
difference = deadline - GetTickCount();
if (difference < 0)
return FALSE;
if ((int)delay > difference)
delay = difference;
}
if (delay > 20)
delay = 20;
Sleep(delay);
/* check for signals */
Py_BLOCK_THREADS
res = PyErr_CheckSignals();
Py_UNBLOCK_THREADS
if (res)
return EXCEPTION_HAS_BEEN_SET;
}
}
/*
* "connection.h" defines the PipeConnection type using the definitions above
*/
#define CONNECTION_NAME "PipeConnection"
#define CONNECTION_TYPE PipeConnectionType
#include "pipe_defs.h"
#include "connection.h"
#ifndef PROCESSING_H
#define PROCESSING_H
static PyObject*
processing_rwbuffer(PyObject *self, PyObject *args)
{
PyObject *obj;
Py_ssize_t offset = 0, size = Py_END_OF_BUFFER;
if (!PyArg_ParseTuple(args, "O|" N_FMT N_FMT, &obj, &offset, &size))
return NULL;
#define PY_SSIZE_T_CLEAN
return PyBuffer_FromReadWriteObject(obj, offset, size);
}
#include "Python.h"
#include "structmember.h"
#include "pythread.h"
static PyObject*
processing_address_of_buffer(PyObject *self, PyObject *obj)
{
void *buffer;
Py_ssize_t buffer_len;
if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0)
return NULL;
/*
* Platform includes and definitions
*/
return Py_BuildValue(N_FMT N_FMT, buffer, buffer_len);
}
#ifdef MS_WINDOWS
# define WIN32_LEAN_AND_MEAN
# include <windows.h>
# include <winsock2.h>
# include <process.h>
# define SEM_HANDLE HANDLE
HANDLE duplicate_handle(HANDLE h);
#else
# include <unistd.h>
# include <sys/socket.h>
# include <arpa/inet.h>
# if HAVE_SEM_OPEN
# include <semaphore.h>
# include <fcntl.h>
typedef sem_t *SEM_HANDLE;
# endif
# define HANDLE int
# define SOCKET int
# define BOOL int
# define UINT32 uint32_t
# define INT32 int32_t
# define TRUE 1
# define FALSE 0
# define INVALID_HANDLE_VALUE (-1)
#endif
/*
* Make sure Py_ssize_t available
*/
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
typedef int Py_ssize_t;
# define PY_SSIZE_T_MAX INT_MAX
# define PY_SSIZE_T_MIN INT_MIN
# define F_PY_SSIZE_T "i"
# define PY_FORMAT_SIZE_T ""
# define PyInt_FromSsize_t(n) PyInt_FromLong((long)n)
#else
# define F_PY_SSIZE_T "n"
#endif
/*
* Format codes
*/
#if SIZEOF_VOID_P == SIZEOF_LONG
# define F_POINTER "k"
# define T_POINTER T_ULONG
#elif defined(HAVE_LONG_LONG) && (SIZEOF_VOID_P == SIZEOF_LONG_LONG)
# define F_POINTER "K"
# define T_POINTER T_ULONGLONG
#else
# error "can't find format code for unsigned integer of same size as void*"
#endif
#ifdef MS_WINDOWS
# define F_HANDLE F_POINTER
# define T_HANDLE T_POINTER
# define F_SEM_HANDLE F_HANDLE
# define T_SEM_HANDLE T_HANDLE
# define F_DWORD "k"
# define T_DWORD T_ULONG
#else
# define F_HANDLE "i"
# define T_HANDLE T_INT
# define F_SEM_HANDLE F_POINTER
# define T_SEM_HANDLE T_POINTER
#endif
/*
* Message length limited to 2**31-1
*/
#define TOO_LONG(n) ((UINT32)n >= 0x7fffffff)
/*
* Error codes which can be returned by functions called without GIL
*/
#define SUCCESS (0)
#define STANDARD_ERROR (-1)
#define MEMORY_ERROR (-1001)
#define END_OF_FILE (-1002)
#define EARLY_END_OF_FILE (-1003)
#define BAD_MESSAGE_LENGTH (-1004)
#define WSA_ERROR (-1005)
#define EXCEPTION_HAS_BEEN_SET (-1006)
PyObject *SetException(PyObject *Type, int num);
/*
* Externs
*/
extern PyObject *dumpsFunction;
extern PyObject *loadsFunction;
extern PyObject *protocol;
extern PyObject *BufferTooShort;
extern PyTypeObject SemLockType;
extern PyTypeObject ConnectionType;
#ifdef MS_WINDOWS
extern PyTypeObject PipeConnectionType;
extern HANDLE hInterruptEvent;
extern long main_thread;
#endif
#endif /* PROCESSING_H */

@@ -6,60 +6,196 @@ /*

*
* Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
* Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
*/
#include "processing_defs.h"
#include "processing.h"
extern PyObject *dumpsFunction, *loadsFunction;
extern PyObject *BufferTooShort;
#ifdef MS_WINDOWS
# define WRITE(h, buffer, length) send((SOCKET)h, buffer, length, 0)
# define READ(h, buffer, length) recv((SOCKET)h, buffer, length, 0)
# define CLOSE(h) CloseHandle(h)
# define DUPLICATE(h) duplicate_handle(h)
#else
# define WRITE(h, buffer, length) write(h, buffer, length)
# define READ(h, buffer, length) read(h, buffer, length)
# define CLOSE(h) close(h)
# define DUPLICATE(h) dup(h)
#endif
#define CONNECTION_NAME "_processing.Connection"
#define CONNECTION_TYPE SocketConnectionType
/*
* Connection struct
*/
#include "socket_defs.h"
#include "connection.h"
#define BUFFER_SIZE 1024
#ifdef MS_WINDOWS
typedef struct {
PyObject_HEAD
SOCKET sock_fd;
int sock_family;
int sock_type;
int sock_proto;
PyObject *(*errorhandler)(void);
double sock_timeout;
} PySocketSockObject;
PyObject_HEAD
HANDLE handle;
PyObject *weakreflist;
char buffer[BUFFER_SIZE];
} Connection;
extern PyTypeObject *socketType;
/*
* Send string to file descriptor
*/
PyObject *
socket_changefd(PyObject *self, PyObject *args)
static Py_ssize_t
_conn_sendall(HANDLE h, char *string, size_t length)
{
PySocketSockObject *s;
int family, type, proto=0;
SOCKET fd, newfd;
char *p = string;
Py_ssize_t res;
while (length > 0) {
res = WRITE(h, p, length);
if (res < 0)
return STANDARD_ERROR;
length -= res;
p += res;
}
return SUCCESS;
}
if (!PyArg_ParseTuple(args, "Oiii|i", &s, &fd, &family, &type, &proto))
return NULL;
/*
* Receive string of exact length from file descriptor
*/
newfd = _duplicate(fd);
if (newfd == INVALID_SOCKET) {
PyErr_SetString(PyExc_OSError, "failed to duplicate socket handle");
return NULL;
static Py_ssize_t
_conn_recvall(HANDLE h, char *buffer, size_t length)
{
size_t remaining = length;
Py_ssize_t temp;
char *p = buffer;
while (remaining > 0) {
temp = READ(h, p, remaining);
if (temp <= 0) {
if (temp == 0)
return remaining == length ? END_OF_FILE : EARLY_END_OF_FILE;
else
return temp;
}
remaining -= temp;
p += temp;
}
if (s->sock_fd != INVALID_SOCKET) {
Py_BEGIN_ALLOW_THREADS
closesocket(s->sock_fd);
Py_END_ALLOW_THREADS
return SUCCESS;
}
/*
* Send a string prepended by the string length in network byte order
*/
static Py_ssize_t
conn_send_string(Connection *conn, char *string, size_t length)
{
/* The "header" of the message is a 32 bit unsigned number (in
network order) which signifies the length of the "body". If
the message is shorter than about 16kb then it is quicker to
combine the "header" and the "body" of the message and send
them at once. */
if (length < (16*1024)) {
char *message;
int res;
message = PyMem_Malloc(length+4);
if (message == NULL)
return MEMORY_ERROR;
*(UINT32*)message = htonl((UINT32)length);
memcpy(message+4, string, length);
res = _conn_sendall(conn->handle, message, length+4);
PyMem_Free(message);
return res;
} else {
UINT32 lenbuff;
if (TOO_LONG(length))
return BAD_MESSAGE_LENGTH;
lenbuff = htonl((UINT32)length);
return _conn_sendall(conn->handle, (char*)&lenbuff, 4) ||
_conn_sendall(conn->handle, string, length);
}
}
s->sock_fd = newfd;
s->sock_family = family;
s->sock_type = type;
s->sock_proto = proto;
/*
* Attempts to read into buffer, or failing that into *newbuffer
*
* Returns number of bytes read.
*/
static Py_ssize_t
conn_recv_string(Connection *conn, char *buffer,
size_t buflength, char **newbuffer)
{
int res;
UINT32 ulength;
Py_RETURN_NONE;
*newbuffer = NULL;
res = _conn_recvall(conn->handle, (char*)&ulength, 4);
if (res < 0)
return res;
ulength = ntohl(ulength);
if (TOO_LONG(ulength))
return BAD_MESSAGE_LENGTH;
if (ulength <= buflength) {
res = _conn_recvall(conn->handle, buffer, (size_t)ulength);
return res < 0 ? res : ulength;
} else {
*newbuffer = PyMem_Malloc((size_t)ulength);
if (*newbuffer == NULL)
return MEMORY_ERROR;
res = _conn_recvall(conn->handle, *newbuffer, (size_t)ulength);
return res < 0 ? (Py_ssize_t)res : (Py_ssize_t)ulength;
}
}
/*
* Check whether any data is available for reading -- neg timeout blocks
*/
static int
conn_poll(Connection *conn, double timeout)
{
int res;
fd_set rfds;
FD_ZERO(&rfds);
FD_SET((SOCKET)conn->handle, &rfds);
if (timeout < 0.0) {
res = select((int)conn->handle+1, &rfds, NULL, NULL, NULL);
} else {
struct timeval tv;
tv.tv_sec = (long)timeout;
tv.tv_usec = (long)((timeout - tv.tv_sec) * 1e6 + 0.5);
res = select((int)conn->handle+1, &rfds, NULL, NULL, &tv);
}
#ifdef MS_WINDOWS
if (res == SOCKET_ERROR) {
return WSA_ERROR;
#else
if (res < 0) {
return STANDARD_ERROR;
#endif
} else if (FD_ISSET(conn->handle, &rfds)) {
return TRUE;
} else if (res == 0) {
return FALSE;
} else {
return -2000; /* should not get here */
}
}
/*
* "connection.h" defines the Connection type using the definitions above
*/
#define CONNECTION_NAME "Connection"
#define CONNECTION_TYPE ConnectionType
#include "connection.h"

@@ -7,7 +7,8 @@ ========

Alexey Akimov, Michele Bertoldi, Josiah Carlson, Tim Couper,
Lisandro Dalcin, Markus Gritsch, Doug Hellmann, Charlie Hull,
Richard Jones, Alexy Khrabrov, Gerald John M. Manipon, Kevin
Manley, Paul Rudin, Sandro Tosi, Dominique Wahli.
Alexey Akimov, Michele Bertoldi, Josiah Carlson, C Cazabon,
Tim Couper, Lisandro Dalcin, Markus Gritsch, Doug Hellmann,
Mikael Hogqvist, Charlie Hull, Richard Jones, Alexy Khrabrov,
Gerald Manipon, Kevin Manley, Skip Montanaro, Robert Morgan,
Paul Rudin, Sandro Tosi, Dominique Wahli, Corey Wright.
Sorry if I have forgotten anyone.
#
# A kludge used by `processing` package because Windows lacks `os.fork()`
#
# processing/fork.py
#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
#
# The `Process.start()` method spawns a new python interpreter which
# runs the `run()` function of this module. The commandline arguments
# passed contain `self`, and some other information (encoded using
# `cPickle` and the `hex` codec).
#
__all__ = []
import os
import sys
import imp
import cPickle
import StringIO
import encodings.hex_codec # hint to freeze tools that we need hex codec
import signal
import processing
from os.path import dirname, splitext, basename, abspath
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
_dir = os.getcwd()
pyexts = ('.py', '.pyc', '.pyo', '.pyw')
def equal(path1, path2):
'''
Test whether the paths match (more or less)
'''
path1 = abspath(path1)
path2 = abspath(path2)
if path1 == path2:
return True
path1 = splitext(path1)
path2 = splitext(path2)
if path1[0] != path2[0]:
return False
return path1[1] in pyexts and path2[1] in pyexts
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--processing-fork':
assert len(argv) == 2
return True
else:
return False
def freezeSupport():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_commandline():
'''
Returns the commandline used for spawning a child interpreter process
'''
if processing.currentProcess()._identity == () and is_forking(sys.argv):
raise RuntimeError, '''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freezeSupport()
...
The "freezeSupport()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.'''
prog = 'from processing._nonforking import main; main()'
if getattr(sys, 'frozen', False):
return [sys.executable, '--processing-fork']
elif sys.executable.lower().endswith('pythonservice.exe'):
exe = os.path.join(os.path.dirname(os.__file__), '..', 'python.exe')
return [exe, '-c', prog, '--processing-fork']
else:
return [sys.executable, '-c', prog, '--processing-fork']
def get_preparation_data(name, new_console):
'''
Return info about parent process will be passed to child process
'''
if sys.argv[0] not in ('', '-c') and not WINEXE:
_mainpath = getattr(sys.modules['__main__'], '__file__', None)
if _mainpath and not os.path.isabs(_mainpath):
_mainpath = os.path.join(_dir, _mainpath)
else:
_mainpath = None
return [name, _mainpath, sys.path, sys.argv, os.getcwd(), new_console]
def prepare(name, main_path, sys_path, sys_argv, curdir, new_console):
'''
Try to get parent __main__ module and record it as sys.module['__main__']
'''
processing.currentProcess().setName(name)
if curdir is not None:
try:
os.chdir(curdir)
except OSError:
print >>sys.stderr, '*** could not change to directory %r' % curdir
# raise
if sys_path is not None:
sys.path = sys_path
if new_console:
from processing import ProcessExit
from processing._processing import SetConsoleCtrlHandler, NULL
def _STOP_PROCESS_handler(signum, frame):
raise ProcessExit
SetConsoleCtrlHandler(NULL, False) # don't ignore Ctrl-C
signal.signal(signal.SIGBREAK, _STOP_PROCESS_handler)
if main_path is not None:
main_name = splitext(basename(main_path))[0]
if main_name == '__init__':
main_name = basename(dirname(main_path))
if not main_path.lower().endswith('.exe') and main_name != 'ipython':
if main_path is None:
dirs = None
elif equal(basename(main_path), '__init__.py'):
dirs = [dirname(dirname(main_path))]
else:
dirs = [dirname(main_path)]
file, pathname, etc = imp.find_module(main_name, dirs)
try:
main_module = imp.load_module(main_name, file, pathname, etc)
finally:
if file:
file.close()
sys.modules['__true_main__'] = sys.modules['__main__']
sys.modules['__main__'] = main_module
if sys_argv is not None: # this should come last
sys.argv = sys_argv
def main():
'''
Run code specifed by data passed from stdin
'''
assert is_forking(sys.argv)
# get data from stdin
preparation_data = sys.stdin.readline().rstrip()
self_data = sys.stdin.readline().rstrip()
# decode data
preparation_data = preparation_data.decode('hex')
self_data = self_data.decode('hex')
# fix up this child process to resemble parent process
preparation_data = cPickle.loads(preparation_data)
prepare(*preparation_data)
# unpickle data
processing.currentProcess()._unpickling = True
self = cPickle.loads(self_data)
processing.currentProcess()._unpickling = False
del preparation_data, self_data
# run code of process object
self._bootstrap()
#
# Analogue of `processing.manager`
#
# processing/dummy/managers.py
#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'CreatorMethod' ]
import threading, array
from threading import BoundedSemaphore, Condition, Event, RLock, Semaphore
from Queue import Queue
def Lock():
return threading.Lock()
class Namespace(object):
def __repr__(self):
items = self.__dict__.items()
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class SharedValue(object):
def __init__(self, format, value):
self._format = format
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__, self._format, self._value)
value = property(get, set)
class SharedStruct(SharedValue):
pass
class BaseManager(object):
def __init__(self, *args, **kwds):
pass
def start(self):
pass
def shutdown(self):
pass
def join(self):
pass
def _debug_info(self):
pass
def __enter__(self):
return self
def __exit__(self, *args):
pass
def CreatorMethod(callable, proxytype=None, exposed=None, typeid=None):
def temp(self, *args, **kwds):
return callable(*args, **kwds)
try:
temp.__name__ = callable.__name__
except TypeError:
pass
return temp
class SyncManager(BaseManager):
Event = CreatorMethod(Event)
Queue = CreatorMethod(Queue)
Lock = CreatorMethod(Lock)
RLock = CreatorMethod(RLock)
Semaphore = CreatorMethod(Semaphore)
BoundedSemaphore = CreatorMethod(BoundedSemaphore)
Condition = CreatorMethod(Condition)
Namespace = CreatorMethod(Namespace)
SharedValue = CreatorMethod(SharedValue)
SharedStruct = CreatorMethod(SharedStruct)
SharedArray = CreatorMethod(array.array)
dict = CreatorMethod(dict)
list = CreatorMethod(list)
class BaseProxy(object):
pass
#
# Module providing the `LocalManager` class for dealing
# with shared objects in shared memory
#
# processing/localmanager.py
#
# Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
#
import mmap
import re
import os
import sys
import tempfile
from processing import synchronize, process, queue, heap, _processing
from struct import pack as _pack, unpack as _unpack, calcsize as _calcsize
from array import array
__all__ = [ 'LocalManager' ]
#
#
#
try:
from struct import Struct
from functools import partial
_struct_cache = {}
except ImportError:
Struct = None
#
# Class for a struct which lives in shared memory
#
class SharedStruct(object):
def __init__(self, format, value, lock):
wrapper = heap.BufferWrapper(_calcsize(format))
self.__setstate__((wrapper, format, lock))
self.set(value)
def __getstate__(self):
assert sys.platform == 'win32'
return self._wrapper, self._format, self._lock
def __setstate__(self, state):
self._wrapper, self._format, self._lock = state
self._buffer = self._wrapper.getview()
self._acquire = self._lock.acquire
self._release = self._lock.release
if Struct:
try:
s = _struct_cache[self._format]
except KeyError:
s = Struct(self._format)
self._get = partial(s.unpack_from, self._buffer)
self._set = partial(s.pack_into, self._buffer, 0)
def _get(self):
return _unpack(self._format, self._buffer[:])
def _set(self, *args):
self._buffer[:] = _pack(self._format, *args)
def get(self):
self._acquire()
try:
return self._get()
finally:
self._release()
def set(self, value):
self._acquire()
try:
self._set(*value)
finally:
self._release()
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self._format, self.get())
value = property(get, set)
#
# Class for a length 1 struct which lives in shared memory
#
class SharedValue(SharedStruct):
def get(self):
self._acquire()
try:
return self._get()[0]
finally:
self._release()
def set(self, value):
self._acquire()
try:
self._set(value)
finally:
self._release()
value = property(get, set)
#
# Class for a shared array which lives in shared memory
#
class SharedArray(object):
def __init__(self, format, sequence, lock):
wrapper = heap.BufferWrapper(_calcsize(format) * len(sequence))
self.__setstate__((wrapper, format, lock))
self[:] = sequence
def __getstate__(self):
assert sys.platform == 'win32'
return self._wrapper, self._format, self._lock
def __setstate__(self, state):
self._wrapper, self._format, self._lock = state
self._buffer = self._wrapper.getview()
self._acquire = self._lock.acquire
self._release = self._lock.release
self._itemsize = _calcsize(self._format)
self._length, rem = divmod(len(self._buffer), self._itemsize)
assert rem == 0
def __len__(self):
return self._length
def __getitem__(self, i):
self._acquire()
try:
a = i * self._itemsize
b = a + self._itemsize
return _unpack(self._format, self._buffer[a:b])[0]
finally:
self._release()
def __setitem__(self, i, value):
self._acquire()
try:
a = i * self._itemsize
b = a + self._itemsize
self._buffer[a:b] = _pack(self._format, value)
finally:
self._release()
def __getslice__(self, a, b):
self._acquire()
try:
at = self._itemsize * a
bt = self._itemsize * b
return array(self._format, self._buffer[at:bt])
finally:
self._release()
def __setslice__(self, a, b, seq):
self._acquire()
try:
at = self._itemsize * a
bt = self._itemsize * b
self._buffer[at:bt] = array(self._format, seq).tostring()
finally:
self._release()
def tostring(self):
self._acquire()
try:
return self._buffer[:]
finally:
self._release()
def tolist(self):
self._acquire()
try:
arr = array(self._format, self._buffer[:])
finally:
self._release()
return list(arr)
def __repr__(self):
if self._format == 'c':
return '%s(%r, %r)' % \
(type(self).__name__, self._format, self.tostring())
else:
return '%s(%r, %r)' % \
(type(self).__name__, self._format, self.tolist())
#
# LocalManager
#
class LocalManager(object):
def __init__(self):
self._lock = synchronize.RLock()
def start(self):
pass
def shutdown(self):
pass
def join(self):
pass
def _debug_info(self):
pass
def __enter__(self):
return self
def __exit__(self, *args):
pass
Event = synchronize.Event
Queue = queue.Queue
Lock = synchronize.Lock
RLock = synchronize.RLock
Semaphore = synchronize.Semaphore
BoundedSemaphore = synchronize.BoundedSemaphore
Condition = synchronize.Condition
def SharedValue(self, format, value):
return SharedValue(format, value, self._lock)
def SharedStruct(self, format, value):
return SharedStruct(format, value, self._lock)
def SharedArray(self, format, sequence):
return SharedArray(format, sequence, self._lock)
#
# Test
#
def _test(x, y, z):
x.value = 42
y.value = (1729, 3.1415927)
for i in range(len(z)):
z[i] *= 2
def test():
from processing import Process
m = LocalManager()
x = m.SharedValue('i', 0)
y = m.SharedStruct('id', (0, 0))
z = m.SharedArray('d', range(10))
p = Process(target=_test, args=(x, y, z))
p.start()
p.join()
print x
print y
print z
if __name__ == '__main__':
test()
/*
* Definitions used by `connection.h`
*
* pipes_def.h
*
* Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
*/
#ifndef _PIPE_CONNECTION_H
#define _PIPE_CONNECTION_H
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#define TOO_LONG(n) (n > 0x7fffffff)
#define INVALID_HANDLE NULL
typedef HANDLE _HANDLE;
static _HANDLE
_duplicate(_HANDLE h)
{
HANDLE dup_h;
BOOL success = DuplicateHandle(
GetCurrentProcess(), (HANDLE)h, GetCurrentProcess(),
&dup_h, 0, FALSE, DUPLICATE_SAME_ACCESS
);
return success ? (_HANDLE)dup_h : INVALID_HANDLE;
}
#define _close(h) CloseHandle(h)
/*
* Error values
*/
#define SUCCESS (0)
#define STANDARD_ERROR (-1)
#define MEMORY_ERROR (-2)
#define ALREADY_SET_ERROR (-3)
#define BAD_MESSAGE_LENGTH (-7)
static PyObject *
SetExcFromNumber(int num)
{
switch (num) {
case STANDARD_ERROR:
PyErr_SetFromWindowsErr(0);
break;
case MEMORY_ERROR:
PyErr_NoMemory();
break;
case ALREADY_SET_ERROR:
break;
case BAD_MESSAGE_LENGTH:
PyErr_SetString(PyExc_IOError, "bad message length");
break;
default:
PyErr_SetString(PyExc_AssertionError, "unknown error number");
}
return NULL;
}
/*
* Send string to the pipe; assumes in message oriented mode
*/
static int
send_string(_HANDLE pipe, char *string, size_t length)
{
size_t amount_written;
if (TOO_LONG(length))
return BAD_MESSAGE_LENGTH;
if (!WriteFile(pipe, string, length, &amount_written, NULL))
return STANDARD_ERROR;
/* assert(length == amount_written); */
return SUCCESS;
}
/*
* Attempts to read into buffer, or if it is too small into *newbuffer.
*
* Returns number of bytes read. Assumes in message oriented mode.
*/
static Py_ssize_t
recv_string(_HANDLE pipe, char *buffer, size_t buflength, char **newbuffer)
{
DWORD left, length, full_length;
*newbuffer = NULL;
if (ReadFile(pipe, buffer, buflength, &length, NULL)) {
if (TOO_LONG(length))
return BAD_MESSAGE_LENGTH;
return length;
} else if (GetLastError() != ERROR_MORE_DATA) {
return STANDARD_ERROR;
}
if (!PeekNamedPipe(pipe, NULL, 0, NULL, NULL, &left))
return STANDARD_ERROR;
full_length = length + left;
if (TOO_LONG(full_length))
return BAD_MESSAGE_LENGTH;
*newbuffer = malloc(full_length);
if (*newbuffer == NULL)
return MEMORY_ERROR;
memcpy(*newbuffer, buffer, length);
if (ReadFile(pipe, *newbuffer+length, left, &length, NULL)) {
return full_length;
} else {
free(*newbuffer);
return STANDARD_ERROR;
}
}
/*
* Check whether any data is available for reading
*/
static int
poll(_HANDLE h, double timeout)
{
DWORD bytes, deadline;
if (timeout <= 0.0) {
if (PeekNamedPipe(h, NULL, 0, NULL, &bytes, NULL))
return bytes > 0;
else
return STANDARD_ERROR;
}
deadline = GetTickCount() + (DWORD)(1000 * timeout);
Sleep(0);
for ( ; ; ) {
if (!PeekNamedPipe(h, NULL, 0, NULL, &bytes, NULL))
return STANDARD_ERROR;
else if (bytes > 0)
return TRUE;
else if (deadline <= GetTickCount())
return FALSE;
Sleep(5);
}
}
#endif /* _PIPE_CONNECTION_H */
/*
* Windows version of extension module used by `processing` package
*
* win_processing.c
*
* Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
*/
#include "processing_defs.h"
#include "processing.h"
extern PyTypeObject BlockerType;
extern PyTypeObject SocketConnectionType;
extern PyTypeObject QueueType;
PyObject *dumpsFunction, *loadsFunction, *Empty, *Full, *BufferTooShort;
int
calc_deadline(double timeout, struct timespec *deadline)
{
struct timeval now;
long sec, nsec;
if (gettimeofday(&now, NULL) < 0)
return -1;
sec = (long) trunc(timeout);
nsec = (long) (1e9 * (timeout - sec));
deadline->tv_sec = now.tv_sec + sec;
deadline->tv_nsec = now.tv_usec * 1000 + nsec;
deadline->tv_sec += (deadline->tv_nsec / 1000000000);
deadline->tv_nsec %= 1000000000;
return 0;
}
#if !NO_SENDFD
/*
* Functions for transferring file descriptors between processes.
* Reimplements some of the functionality of the `fdcred`
* module at `http://www.mca-ltd.com/resources/fdcred_1.tgz`.
*/
#include <sys/socket.h>
static int
sendfd(int conn, int fd)
{
char dummy_char;
char buf[CMSG_SPACE(sizeof(int))];
struct msghdr msg = {0};
struct iovec dummy_iov;
struct cmsghdr *cmsg;
dummy_iov.iov_base = &dummy_char;
dummy_iov.iov_len = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
msg.msg_iov = &dummy_iov;
msg.msg_iovlen = 1;
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
msg.msg_controllen = cmsg->cmsg_len;
*(int*)CMSG_DATA(cmsg) = fd;
return sendmsg(conn, &msg, 0);
}
static int
recvfd(int conn, int *fd)
{
int result;
char dummy_char;
char buf[CMSG_SPACE(sizeof(int))];
struct msghdr msg = {0};
struct iovec dummy_iov;
struct cmsghdr *cmsg;
dummy_iov.iov_base = &dummy_char;
dummy_iov.iov_len = 1;
msg.msg_control = buf;
msg.msg_controllen = sizeof(buf);
msg.msg_iov = &dummy_iov;
msg.msg_iovlen = 1;
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
msg.msg_controllen = cmsg->cmsg_len;
result = recvmsg(conn, &msg, 0);
*fd = *(int*)CMSG_DATA(cmsg);
return result;
}
static PyObject *
processing_sendfd(PyObject *self, PyObject *args)
{
int conn, fd;
if (!PyArg_ParseTuple(args, "ii", &conn, &fd))
return NULL;
if (sendfd(conn, fd) < 0)
return PyErr_SetFromErrno(PyExc_OSError);
Py_RETURN_NONE;
}
static PyObject *
processing_recvfd(PyObject *self, PyObject *args)
{
int conn, fd;
if (!PyArg_ParseTuple(args, "i", &conn))
return NULL;
if (recvfd(conn, &fd) < 0)
return PyErr_SetFromErrno(PyExc_OSError);
return Py_BuildValue("i", fd);
}
#endif /* !NO_SENDFD */
static PyMethodDef module_methods[] = {
#if !NO_SENDFD
{"sendfd", processing_sendfd, METH_VARARGS,
"sendfd(sockfd, fd) -> None: send file descriptor given by fd over\n"
"the unix domain socket whose file decriptor is sockfd"},
{"recvfd", processing_recvfd, METH_VARARGS,
"recvfd(sockfd) -> fd: returns a file descriptor over\n"
"a unix domain socket whose file decriptor is sockfd"},
#endif
{"rwbuffer", processing_rwbuffer, METH_VARARGS,
"rwbuffer(object [, offset[, size]]) -> read-write buffer"},
{"address_of_buffer", processing_address_of_buffer, METH_O,
"address_of_buffer(obj) -> (address, size)"},
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC
init_processing(void)
{
PyObject *m, *other_module;
/*
* Initialize module
*/
m = Py_InitModule("_processing", module_methods);
/*
* Get copy of `cPickle.dumps` and `cPickle.loads`
*/
other_module = PyImport_ImportModule("cPickle");
if (!other_module)
return;
dumpsFunction = PyObject_GetAttrString(other_module, "dumps");
loadsFunction = PyObject_GetAttrString(other_module, "loads");
Py_XDECREF(other_module);
/*
* Add exception to module
*/
BufferTooShort = PyErr_NewException("_processing.BufferToShort",
NULL, NULL);
Py_INCREF(BufferTooShort);
PyModule_AddObject(m, "BufferTooShort", BufferTooShort);
/*
* Add type objects to module
*/
if (PyType_Ready(&SocketConnectionType) < 0)
return;
Py_INCREF(&SocketConnectionType);
PyModule_AddObject(m, "Connection", (PyObject*)&SocketConnectionType);
#if USE_POSIX_SEMAPHORE
if (PyType_Ready(&BlockerType) < 0)
return;
Py_INCREF(&BlockerType);
PyModule_AddObject(m, "Blocker", (PyObject*)&BlockerType);
#endif
#if USE_POSIX_QUEUE
other_module = PyImport_ImportModule("Queue");
Empty = PyObject_GetAttrString(other_module, "Empty");
Full = PyObject_GetAttrString(other_module, "Full");
Py_XDECREF(other_module);
if (PyType_Ready(&QueueType) < 0)
return;
Py_INCREF(&QueueType);
PyModule_AddObject(m, "Queue", (PyObject*)&QueueType);
#endif
}
/*
* A type which wraps a posix named semaphore
*
* posix_processing.c
*
* Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
*/
#include "processing_defs.h"
#include "pythread.h"
#include <semaphore.h>
#include <fcntl.h>
#include <time.h>
#include <math.h>
#if NO_SEM_UNLINK
int sem_unlink(char *name) { return 0; }
#endif
extern int calc_deadline(double timeout, struct timespec *deadline);
enum { MUTEX, RECURSIVE_MUTEX, SEMAPHORE, BOUNDED_SEMAPHORE };
#define IS_MUTEX(self) ((self)->kind < SEMAPHORE)
typedef struct {
PyObject_HEAD
char *name;
sem_t *handle;
int maxvalue;
int kind;
int count;
pid_t last_pid;
long last_tid;
} Blocker;
PyTypeObject BlockerType;
static int
ismine(Blocker *self)
{
if (self->last_pid == getpid()) {
if (self->count > 0 && PyThread_get_thread_ident() == self->last_tid)
return 1;
} else {
if (self->count != 0) /* correction after fork */
self->count = 0;
}
return 0;
}
static PyObject *
sync_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
Blocker *self;
self = (Blocker*)type->tp_alloc(type, 0);
if (self != NULL) {
self->name = NULL;
self->handle = NULL;
}
return (PyObject*)self;
}
static int
sync_init(Blocker *self, PyObject *args, PyObject *kwds)
{
char *name;
int create, kind, value = -1;
static char *kwlist[] = {"name", "create", "kind", "value", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "siii", kwlist,
&name, &create, &kind, &value))
return -1;
self->name = malloc(strlen(name)+1);
if (self->name == NULL) {
PyErr_NoMemory();
return -1;
}
memcpy(self->name, name, strlen(name)+1);
self->maxvalue = (kind == BOUNDED_SEMAPHORE ? value : -1);
self->kind = kind;
self->count = 0;
self->last_pid = 0;
self->last_tid = 0;
if (create)
self->handle = sem_open(name, O_CREAT | O_EXCL, 0600, value);
else
self->handle = sem_open(name, 0);
if (self->handle == SEM_FAILED) {
PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
return 0;
}
static PyObject *
sync_close(Blocker *self)
{
int res;
if (self->handle != NULL) {
if (IS_MUTEX(self) && self->last_pid == getpid() && self->count > 0) {
self->count = 0;
sem_post(self->handle);
}
res = sem_close(self->handle);
self->handle = NULL;
if (res < 0)
return PyErr_SetFromErrno(PyExc_OSError);
}
Py_RETURN_NONE;
}
static PyObject *
sync_unlink(Blocker *self)
{
int res;
res = sem_unlink(self->name);
if (res < 0)
return PyErr_SetFromErrno(PyExc_OSError);
Py_RETURN_NONE;
}
static void
sync_dealloc(Blocker* self)
{
Py_XDECREF(sync_close(self));
free(self->name);
self->name = NULL;
self->ob_type->tp_free((PyObject*)self);
}
static PyObject *
sync_acquire(Blocker *self, PyObject *args)
{
int blocking = 1, mine, res;
if (!PyArg_ParseTuple(args, "|i", &blocking))
return NULL;
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore is closed");
return NULL;
}
mine = ismine(self);
if (self->kind == MUTEX && mine) {
PyErr_SetString(PyExc_AssertionError, "attempt to acquire a "
"non-recusive lock already owned by thread");
return NULL;
}
if (self->kind == RECURSIVE_MUTEX && mine) {
++self->count;
Py_RETURN_TRUE;
}
do {
Py_BEGIN_ALLOW_THREADS
if (blocking)
res = sem_wait(self->handle);
else
res = sem_trywait(self->handle);
Py_END_ALLOW_THREADS
} while (res < 0 && errno == EINTR && !PyErr_CheckSignals());
if (res < 0) {
if (errno == EAGAIN)
Py_RETURN_FALSE;
else if (errno == EINTR)
return NULL;
else
return PyErr_SetFromErrno(PyExc_OSError);
}
++self->count;
self->last_pid = getpid();
self->last_tid = PyThread_get_thread_ident();
Py_RETURN_TRUE;
}
static PyObject *
sync_release(Blocker *self)
{
int sval, res, mine;
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore is closed");
return NULL;
}
mine = ismine(self);
if (IS_MUTEX(self)) {
if (!mine) {
PyErr_SetString(PyExc_AssertionError, "attempt to release a "
"lock which is not owned by thread");
return NULL;
}
if (self->count > 1) { /* a recursively acquired mutex */
--self->count;
Py_RETURN_NONE;
}
} else if (self->kind == BOUNDED_SEMAPHORE) {
if (sem_getvalue(self->handle, &sval) < 0)
return PyErr_SetFromErrno(PyExc_OSError);
if (sval >= self->maxvalue) {
PyErr_SetString(PyExc_ValueError,
"Semaphore released too many times");
return NULL;
}
}
res = sem_post(self->handle);
if (res < 0)
return PyErr_SetFromErrno(PyExc_OSError);
--self->count;
Py_RETURN_NONE;
}
static PyObject *
sync_acquire_timeout(Blocker *self, PyObject *args)
{
double timeout = 0.0;
int res, mine;
if (!PyArg_ParseTuple(args, "d", &timeout))
return NULL;
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore is closed");
return NULL;
}
mine = ismine(self);
if (self->kind == MUTEX && mine) {
PyErr_SetString(PyExc_AssertionError, "attempt to acquire a "
"non-recusive lock owned by thread");
return NULL;
}
if (self->kind == RECURSIVE_MUTEX && mine) {
++self->count;
Py_RETURN_TRUE;
}
if (timeout < 0.0)
timeout = 0.0;
#if NO_SEM_TIMED
res = sem_trywait(self->handle);
if (res < 0 && errno == EAGAIN) {
struct timeval now, deadline;
unsigned long delay, difference;
/* get current time */
if (gettimeofday(&now, NULL) < 0)
return NULL;
/* calculate when we should be prepared to wait until */
difference = (unsigned long)(timeout * 1000000);
deadline.tv_sec = now.tv_sec + difference / 1000000;
deadline.tv_usec = now.tv_usec + difference % 1000000;
if (deadline.tv_usec >= 1000000) {
deadline.tv_sec++;
deadline.tv_usec %= 1000000;
}
for (delay = 0 ; ; delay += 1000) {
/* poll */
res = sem_trywait(self->handle);
if (res < 0 && errno != EAGAIN)
break;
/* get current time */
if (gettimeofday(&now, NULL) < 0)
return NULL;
/* check for timeout */
if (deadline.tv_sec < now.tv_sec ||
(deadline.tv_sec == now.tv_sec &&
deadline.tv_usec <= now.tv_usec))
break;
/* calculate how much time is left */
difference = (deadline.tv_sec - now.tv_sec)*1000000 +
(deadline.tv_usec - now.tv_usec);
/* check delay not too long -- maximum is 20 msecs */
if (delay > 20000)
delay = 20000;
if (delay > difference)
delay = difference;
/* sleep */
usleep(delay);
if (PyErr_CheckSignals())
break;
}
}
#else
{
struct timespec deadline;
if (calc_deadline(timeout, &deadline) < 0)
return PyErr_SetFromErrno(PyExc_OSError);
do {
Py_BEGIN_ALLOW_THREADS
res = sem_timedwait(self->handle, &deadline);
Py_END_ALLOW_THREADS
} while (res < 0 && errno == EINTR && !PyErr_CheckSignals());
}
#endif /* NO_SEM_TIMED */
if (res < 0) {
if (errno == EAGAIN || errno == ETIMEDOUT)
Py_RETURN_FALSE;
else if (errno == EINTR)
return NULL;
else
return PyErr_SetFromErrno(PyExc_OSError);
}
++self->count;
self->last_pid = getpid();
self->last_tid = PyThread_get_thread_ident();
Py_RETURN_TRUE;
}
static PyObject *
sync_count(Blocker *self)
{
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore is closed");
return NULL;
}
if (self->last_pid != getpid() && self->count > 0)
self->count = 0; /* correction after fork */
return Py_BuildValue("i", self->count);
}
static PyObject *
sync_getvalue(Blocker *self)
{
int sval;
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore/mutex is closed");
return NULL;
}
if (sem_getvalue(self->handle, &sval) < 0)
return PyErr_SetFromErrno(PyExc_OSError);
return Py_BuildValue("i", sval);
}
static PyObject *
sync_ismine(Blocker *self)
{
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore/mutex is closed");
return NULL;
}
if (!IS_MUTEX(self)) {
PyErr_SetString(PyExc_AssertionError, "semaphores cannot be owned");
return NULL;
}
if (ismine(self))
Py_RETURN_TRUE;
else
Py_RETURN_FALSE;
}
static PyMethodDef sync_methods[] = {
{"acquire", (PyCFunction)sync_acquire, METH_VARARGS,
"acquire the semaphore/mutex"},
{"release", (PyCFunction)sync_release, METH_NOARGS,
"release the semaphore/mutex"},
{"acquire_timeout", (PyCFunction)sync_acquire_timeout, METH_VARARGS,
"acquire the semaphore/mutex"},
{"_close", (PyCFunction)sync_close, METH_NOARGS,
"close the semaphore/mutex"},
{"_unlink", (PyCFunction)sync_unlink, METH_NOARGS,
"unlink the name of the semaphore/mutex"},
{"_count", (PyCFunction)sync_count, METH_NOARGS,
"number of `acquire()`s minus number of `release()`s for this process"},
{"_ismine", (PyCFunction)sync_ismine, METH_NOARGS,
"whether the mutex is owned by this thread"},
{"_getvalue", (PyCFunction)sync_getvalue, METH_NOARGS,
"get the value of the semaphore"},
{NULL}
};
#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
#define PyMODINIT_FUNC void
#endif
PyTypeObject BlockerType = {
PyObject_HEAD_INIT(NULL)
0, /* ob_size*/
"_processing.Blocker", /* tp_name*/
sizeof(Blocker), /* tp_basicsize*/
0, /* tp_itemsize*/
(destructor)sync_dealloc,
/* tp_dealloc*/
0, /* tp_print*/
0, /* tp_getattr*/
0, /* tp_setattr*/
0, /* tp_compare*/
0, /* tp_repr*/
0, /* tp_as_number*/
0, /* tp_as_sequence*/
0, /* tp_as_mapping*/
0, /* tp_hash */
0, /* tp_call*/
0, /* tp_str*/
0, /* tp_getattro*/
0, /* tp_setattro*/
0, /* tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
/* tp_flags*/
"Semaphore/Mutex type", /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
sync_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)sync_init, /* tp_init */
0, /* tp_alloc */
(newfunc)sync_new, /* tp_new */
};
#ifndef PROCESSING_DEFS_H
#define PROCESSING_DEFS_H
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
typedef int Py_ssize_t;
# define PY_SSIZE_T_MAX INT_MAX
# define PY_SSIZE_T_MIN INT_MIN
# define N_FMT "i"
#else
# define N_FMT "n"
#endif
#endif /* PROCESSING_DEFS_H */
/*
* Definitions used by `connection.h`
*
* socket_def.h
*
* Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
*/
#ifndef _SOCKET_CONNECTION_H
#define _SOCKET_CONNECTION_H
#define TOO_LONG(n) (n > 0x7fffffff)
#ifdef MS_WINDOWS
/*
* Windows definitions
*/
#define WIN32_LEAN_AND_MEAN
#include <winsock2.h>
typedef SOCKET _HANDLE;
typedef unsigned uint32_t;
#define INVALID_HANDLE INVALID_SOCKET
#define _write(h, buffer, length) send(h, buffer, length, 0)
#define _read(h, buffer, length) recv(h, buffer, length, 0)
#define _close(h) closesocket(h)
static _HANDLE
_duplicate(_HANDLE h)
{
HANDLE dup_h;
BOOL success = DuplicateHandle(
GetCurrentProcess(), (HANDLE)h, GetCurrentProcess(),
&dup_h, 0, FALSE, DUPLICATE_SAME_ACCESS
);
return success ? (_HANDLE)dup_h : INVALID_HANDLE;
}
#else
/*
* Posix definitions
*/
#include <sys/socket.h>
#include <arpa/inet.h>
#include <unistd.h>
typedef int BOOL;
typedef unsigned char BYTE;
typedef int _HANDLE;
#define TRUE (1)
#define FALSE (0)
#define INVALID_HANDLE (-1)
#define _write write
#define _read read
#define _close close
#define _duplicate dup
#endif /* MS_WINDOWS */
/*
* Error values
*/
#define SUCCESS (0)
#define STANDARD_ERROR (-1)
#define MEMORY_ERROR (-2)
#define ALREADY_SET_ERROR (-3)
#define SELECT_ERROR (-4)
#define END_OF_FILE (-5)
#define EARLY_END_OF_FILE (-6)
#define BAD_MESSAGE_LENGTH (-7)
#define SOME_SOCKET_ERROR (-8)
static PyObject *
SetExcFromNumber(int num)
{
switch (num) {
case STANDARD_ERROR:
#ifdef MS_WINDOWS
PyErr_SetFromWindowsErr(0);
#else
PyErr_SetFromErrno(PyExc_OSError);
#endif
break;
case MEMORY_ERROR:
PyErr_NoMemory();
break;
case ALREADY_SET_ERROR:
break;
case END_OF_FILE:
PyErr_SetString(PyExc_IOError, "got end of file");
break;
case EARLY_END_OF_FILE:
PyErr_SetString(PyExc_IOError, "got end of file during message");
break;
case BAD_MESSAGE_LENGTH:
PyErr_SetString(PyExc_IOError, "message length is bad");
break;
#ifdef MS_WINDOWS
case SOME_SOCKET_ERROR:
PyErr_SetExcFromWindowsErr(PyExc_IOError, WSAGetLastError()) ;
break;
#endif
default:
PyErr_Format(PyExc_RuntimeError, "unkown error number: %d", num);
}
return NULL;
}
/*
* Send string to file descriptor
*/
static int
_sendall(_HANDLE h, char *string, size_t length)
{
char *p = string;
Py_ssize_t res;
while (length > 0) {
res = _write(h, p, length);
if (res < 0)
return STANDARD_ERROR;
length -= res;
p += res;
}
return SUCCESS;
}
/*
* Receive string of exact length from file descriptor
*/
static int
_recvall(_HANDLE h, char *buffer, size_t length)
{
size_t remaining = length;
Py_ssize_t temp;
char *p = buffer;
while (remaining > 0) {
temp = _read(h, p, remaining);
if (temp <= 0) {
if (temp == 0)
return remaining == length
? END_OF_FILE : EARLY_END_OF_FILE;
else
return temp;
}
remaining -= temp;
p += temp;
}
return SUCCESS;
}
/*
* Send a string prepended by the string length in network byte order
*/
static int
send_string(_HANDLE h, char *string, size_t length)
{
if (length < 0x4000) {
char *message;
int res;
message = malloc(length+4);
if (message == NULL)
return MEMORY_ERROR;
*(uint32_t*)message = htonl((uint32_t)length);
memcpy(message+4, string, length);
res = _sendall(h, message, length+4);
free(message);
return res;
} else {
uint32_t lenbuff;
if (TOO_LONG(length))
return BAD_MESSAGE_LENGTH;
lenbuff = htonl((uint32_t)length);
return _sendall(h, (char*)&lenbuff, 4) || _sendall(h, string, length);
}
}
/*
* Attempts to read into buffer, or failing that into *newbuffer
*
* Returns number of bytes read.
*/
static Py_ssize_t
recv_string(_HANDLE h, char *buffer, size_t buflength, char **newbuffer)
{
int res;
uint32_t ulength;
*newbuffer = NULL;
res = _recvall(h, (char*)&ulength, 4);
if (res < 0)
return res;
ulength = ntohl(ulength);
if (TOO_LONG(ulength))
return BAD_MESSAGE_LENGTH;
if (ulength <= buflength) {
res = _recvall(h, buffer, (size_t)ulength);
return res < 0 ? res : ulength;
} else {
*newbuffer = malloc(ulength > 0 ? (size_t)ulength : 1);
if (*newbuffer == NULL)
return MEMORY_ERROR;
res = _recvall(h, *newbuffer, (size_t)ulength);
return res < 0 ? (Py_ssize_t)res : (Py_ssize_t)ulength;
}
}
/*
* Check whether any data is available for reading
*/
static int
poll(_HANDLE fd, double timeout)
{
int res;
fd_set rfds;
struct timeval tv;
FD_ZERO(&rfds);
FD_SET(fd, &rfds);
if (timeout < 0.0)
timeout = 0.0;
tv.tv_sec = (long)timeout;
tv.tv_usec = (long)((timeout - tv.tv_sec) * 1e6);
res = select(fd+1, &rfds, NULL, NULL, &tv);
if (res < 0) {
#ifdef MS_WINDOWS
return SOME_SOCKET_ERROR;
#else
return STANDARD_ERROR;
#endif
} else if (FD_ISSET(fd, &rfds)) {
return TRUE;
} else if (res == 0) {
return FALSE;
} else {
#ifdef MS_WINDOWS
return SELECT_ERROR;
#else
return STANDARD_ERROR;
#endif
}
}
#endif /* _SOCKET_CONNECTION_H */
/*
* Windows version of extension module used by `processing` package
*
* win_processing.c
*
* Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
*/
#include "processing_defs.h"
#include "processing.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
extern PyTypeObject BlockerType;
extern PyTypeObject PipeConnectionType;
extern PyTypeObject SocketConnectionType;
extern PyObject *socket_changefd(PyObject *self, PyObject *args);
PyObject *dumpsFunction, *loadsFunction, *BufferTooShort, *socketType;
/*
* Win32 functions
*/
static PyObject *
win32_CloseHandle(PyObject *self, PyObject *args)
{
HANDLE hObject;
BOOL success;
if (!PyArg_ParseTuple(args, "k", &hObject))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = CloseHandle(hObject);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_ConnectNamedPipe(PyObject *self, PyObject *args)
{
HANDLE hNamedPipe;
LPOVERLAPPED lpOverlapped;
BOOL success;
if (!PyArg_ParseTuple(args, "k" N_FMT, &hNamedPipe, &lpOverlapped))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = ConnectNamedPipe(hNamedPipe, lpOverlapped);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_CreateFile(PyObject *self, PyObject *args)
{
LPCTSTR lpFileName;
DWORD dwDesiredAccess;
DWORD dwShareMode;
LPSECURITY_ATTRIBUTES lpSecurityAttributes;
DWORD dwCreationDisposition;
DWORD dwFlagsAndAttributes;
HANDLE hTemplateFile;
HANDLE handle;
if (!PyArg_ParseTuple(args, "skk" N_FMT "kkk",
&lpFileName, &dwDesiredAccess, &dwShareMode,
&lpSecurityAttributes, &dwCreationDisposition,
&dwFlagsAndAttributes, &hTemplateFile))
return NULL;
Py_BEGIN_ALLOW_THREADS
handle = CreateFile(lpFileName, dwDesiredAccess, dwShareMode,
lpSecurityAttributes, dwCreationDisposition,
dwFlagsAndAttributes, hTemplateFile);
Py_END_ALLOW_THREADS
if (handle == INVALID_HANDLE_VALUE)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue("k", handle);
}
static PyObject *
win32_CreateNamedPipe(PyObject *self, PyObject *args)
{
LPCTSTR lpName;
DWORD dwOpenMode;
DWORD dwPipeMode;
DWORD nMaxInstances;
DWORD nOutBufferSize;
DWORD nInBufferSize;
DWORD nDefaultTimeOut;
LPSECURITY_ATTRIBUTES lpSecurityAttributes;
HANDLE handle;
if (!PyArg_ParseTuple(args, "skkkkkk" N_FMT,
&lpName, &dwOpenMode, &dwPipeMode, &nMaxInstances,
&nOutBufferSize, &nInBufferSize, &nDefaultTimeOut,
&lpSecurityAttributes))
return NULL;
Py_BEGIN_ALLOW_THREADS
handle = CreateNamedPipe(lpName, dwOpenMode, dwPipeMode, nMaxInstances,
nOutBufferSize, nInBufferSize, nDefaultTimeOut,
lpSecurityAttributes);
Py_END_ALLOW_THREADS
if (handle == INVALID_HANDLE_VALUE)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue("k", handle);
}
static PyObject *
win32_DuplicateHandle(PyObject *self, PyObject *args)
{
HANDLE hSourceProcessHandle;
HANDLE hSourceHandle;
HANDLE hTargetProcessHandle;
DWORD dwDesiredAccess;
BOOL bInheritHandle;
DWORD dwOptions;
HANDLE handle;
if (!PyArg_ParseTuple(args, "kkkkik",
&hSourceProcessHandle, &hSourceHandle,
&hTargetProcessHandle, &dwDesiredAccess,
&bInheritHandle, &dwOptions))
return NULL;
if (!DuplicateHandle(hSourceProcessHandle, hSourceHandle,
hTargetProcessHandle, &handle,
dwDesiredAccess, bInheritHandle, dwOptions))
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue("k", handle);
}
static PyObject *
win32_GenerateConsoleCtrlEvent(PyObject *self, PyObject *args)
{
DWORD dwCtrlEvent;
DWORD dwProcessGroupId;
if (!PyArg_ParseTuple(args, "kk", &dwCtrlEvent, &dwProcessGroupId))
return NULL;
if (!GenerateConsoleCtrlEvent(dwCtrlEvent, dwProcessGroupId))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_GetCurrentProcess(PyObject *self, PyObject *args)
{
return Py_BuildValue("k", GetCurrentProcess());
}
static PyObject *
win32_OpenProcess(PyObject *self, PyObject *args)
{
DWORD dwDesiredAccess;
BOOL bInheritHandle;
DWORD dwProcessId;
HANDLE handle;
if (!PyArg_ParseTuple(args, "kik", &dwDesiredAccess, &bInheritHandle,
&dwProcessId))
return NULL;
handle = OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId);
if (handle == NULL)
return PyErr_SetFromWindowsErr(0);
return Py_BuildValue("k", handle);
}
static PyObject *
win32_SetConsoleCtrlHandler(PyObject *self, PyObject *args)
{
PHANDLER_ROUTINE HandlerRoutine;
BOOL Add;
if (!PyArg_ParseTuple(args, N_FMT "i", &HandlerRoutine, &Add))
return NULL;
if (!SetConsoleCtrlHandler(HandlerRoutine, Add))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_SetHandleInformation(PyObject *self, PyObject *args)
{
HANDLE hObject;
DWORD dwMask;
DWORD dwFlags;
if (!PyArg_ParseTuple(args, "kkk", &hObject, &dwMask, &dwFlags))
return NULL;
if (!SetHandleInformation(hObject, dwMask, dwFlags))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_SetNamedPipeHandleState(PyObject *self, PyObject *args)
{
HANDLE hNamedPipe;
DWORD dwMode;
PyObject *ignore1, *ignore2;
if (!PyArg_ParseTuple(args, "kkOO",
&hNamedPipe, &dwMode, &ignore1, &ignore2))
return NULL;
if (ignore1 != Py_None || ignore2 != Py_None) {
PyErr_SetString(PyExc_ValueError, "last two arguments must be None");
return NULL;
}
if (!SetNamedPipeHandleState(hNamedPipe, &dwMode, NULL, NULL))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_TerminateProcess(PyObject *self, PyObject *args)
{
HANDLE hProcess;
UINT uExitCode;
if (!PyArg_ParseTuple(args, "kI", &hProcess, &uExitCode))
return NULL;
if (!TerminateProcess(hProcess, uExitCode))
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
static PyObject *
win32_WaitNamedPipe(PyObject *self, PyObject *args)
{
LPCTSTR lpNamedPipeName;
DWORD nTimeOut;
BOOL success;
if (!PyArg_ParseTuple(args, "sk", &lpNamedPipeName, &nTimeOut))
return NULL;
Py_BEGIN_ALLOW_THREADS
success = WaitNamedPipe(lpNamedPipeName, nTimeOut);
Py_END_ALLOW_THREADS
if (!success)
return PyErr_SetFromWindowsErr(0);
Py_RETURN_NONE;
}
/*
*
*/
#define WIN32_FUNCTION(func) \
{#func, (PyCFunction)win32_ ## func, METH_VARARGS, ""}
#define WIN32_CONSTANT(fmt, con) \
PyModule_AddObject(m, #con, Py_BuildValue(fmt, con))
static PyMethodDef module_methods[] = {
{"changefd", (PyCFunction)socket_changefd, METH_VARARGS, ""},
{"rwbuffer", processing_rwbuffer, METH_VARARGS, ""},
{"address_of_buffer", processing_address_of_buffer, METH_O, ""},
WIN32_FUNCTION(CloseHandle),
WIN32_FUNCTION(ConnectNamedPipe),
WIN32_FUNCTION(CreateFile),
WIN32_FUNCTION(CreateNamedPipe),
WIN32_FUNCTION(DuplicateHandle),
WIN32_FUNCTION(GenerateConsoleCtrlEvent),
WIN32_FUNCTION(GetCurrentProcess),
WIN32_FUNCTION(OpenProcess),
WIN32_FUNCTION(SetConsoleCtrlHandler),
WIN32_FUNCTION(SetHandleInformation),
WIN32_FUNCTION(SetNamedPipeHandleState),
WIN32_FUNCTION(TerminateProcess),
WIN32_FUNCTION(WaitNamedPipe),
{NULL, NULL}
};
PyMODINIT_FUNC
init_processing(void)
{
PyObject *m, *other_module;
/*
* Initialize module
*/
m = Py_InitModule("_processing", module_methods);
/*
* Add win32 constants
*/
WIN32_CONSTANT("k", DUPLICATE_SAME_ACCESS);
WIN32_CONSTANT("k", ERROR_PIPE_BUSY);
WIN32_CONSTANT("k", ERROR_PIPE_CONNECTED);
WIN32_CONSTANT("k", ERROR_SEM_TIMEOUT);
WIN32_CONSTANT("k", GENERIC_READ);
WIN32_CONSTANT("k", GENERIC_WRITE);
WIN32_CONSTANT("k", HANDLE_FLAG_INHERIT);
WIN32_CONSTANT("k", NMPWAIT_WAIT_FOREVER);
WIN32_CONSTANT("k", OPEN_EXISTING);
WIN32_CONSTANT("k", PIPE_ACCESS_DUPLEX);
WIN32_CONSTANT("k", PIPE_ACCESS_INBOUND);
WIN32_CONSTANT("k", PIPE_ACCESS_OUTBOUND);
WIN32_CONSTANT("k", PIPE_READMODE_MESSAGE);
WIN32_CONSTANT("k", PIPE_TYPE_MESSAGE);
WIN32_CONSTANT("k", PIPE_UNLIMITED_INSTANCES);
WIN32_CONSTANT("k", PIPE_WAIT);
WIN32_CONSTANT("k", PROCESS_ALL_ACCESS);
WIN32_CONSTANT("k", NULL);
/*
* Get copy of `cPickle.dumps` and `cPickle.loads`
*/
other_module = PyImport_ImportModule("cPickle");
if (!other_module)
return;
dumpsFunction = PyObject_GetAttrString(other_module, "dumps");
loadsFunction = PyObject_GetAttrString(other_module, "loads");
Py_XDECREF(other_module);
/*
* Get copy of `_socket.socket`
*/
other_module = PyImport_ImportModule("_socket");
if (!other_module)
return;
socketType = PyObject_GetAttrString(other_module, "socket");
Py_XDECREF(other_module);
/*
* Add exception to module
*/
BufferTooShort = PyErr_NewException("_processing.BufferToShort",
NULL, NULL);
if (!BufferTooShort)
return;
Py_INCREF(BufferTooShort);
PyModule_AddObject(m, "BufferTooShort", BufferTooShort);
/*
* Add type objects to module
*/
if (PyType_Ready(&PipeConnectionType) < 0)
return;
Py_INCREF(&PipeConnectionType);
PyModule_AddObject(m,"PipeConnection",(PyObject*)&PipeConnectionType);
if (PyType_Ready(&SocketConnectionType) < 0)
return;
Py_INCREF(&SocketConnectionType);
PyModule_AddObject(m,"Connection",(PyObject*)&SocketConnectionType);
if (PyType_Ready(&BlockerType) < 0)
return;
Py_INCREF(&BlockerType);
PyModule_AddObject(m, "Blocker", (PyObject*)&BlockerType);
}
/*
* A type which wraps a windows mutex or semaphore
*
* win_semaphore.c
*
* Copyright (c) 2006, 2007, R Oudkerk --- see COPYING.txt
*/
#include "processing_defs.h"
#include "pythread.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
enum { MUTEX, RECURSIVE_MUTEX, SEMAPHORE, BOUNDED_SEMAPHORE };
#define IS_MUTEX(self) ((self)->kind < SEMAPHORE)
typedef struct {
PyObject_HEAD
char *name;
HANDLE *handle;
int kind;
int count;
long last_tid;
} Blocker;
PyTypeObject BlockerType;
static PyObject *
sync_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
Blocker *self;
self = (Blocker*)type->tp_alloc(type, 0);
if (self != NULL) {
self->name = NULL;
self->handle = NULL;
}
return (PyObject*)self;
}
static int
sync_init(Blocker *self, PyObject *args, PyObject *kwds)
{
char *name;
int create, kind, value = -1;
static char *kwlist[] = {"name", "create", "kind", "value", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "siii", kwlist,
&name, &create, &kind, &value))
return -1;
self->name = malloc(strlen(name)+1);
if (self->name == NULL) {
PyErr_NoMemory();
return -1;
}
memcpy(self->name, name, strlen(name)+1);
self->kind = kind;
self->count = 0;
self->last_tid = 0;
if (create) {
if (IS_MUTEX(self))
self->handle = CreateMutex(NULL, FALSE, name);
else if (self->kind == BOUNDED_SEMAPHORE)
self->handle = CreateSemaphore(NULL, value, value, name);
else
self->handle = CreateSemaphore(NULL, value, 0x7FFFFFFF, name);
if (GetLastError() == ERROR_ALREADY_EXISTS) {
CloseHandle(self->handle);
PyErr_SetFromWindowsErr(0);
}
} else {
if (IS_MUTEX(self))
self->handle = OpenMutex(MUTEX_ALL_ACCESS, FALSE, name);
else
self->handle = OpenSemaphore(SEMAPHORE_ALL_ACCESS, FALSE, name);
}
if (self->handle == NULL) {
PyErr_SetFromWindowsErr(0);
return -1;
}
return 0;
}
static PyObject *
sync_close(Blocker *self)
{
int res;
if (self->handle != NULL) {
res = CloseHandle(self->handle);
self->handle = NULL;
if (!res)
return PyErr_SetFromWindowsErr(0);
}
Py_RETURN_NONE;
}
static void
sync_dealloc(Blocker* self)
{
PyObject *ignore;
ignore = sync_close(self);
Py_XDECREF(ignore);
free(self->name);
self->name = NULL;
self->ob_type->tp_free((PyObject*)self);
}
static int
wait_and_check_signals(HANDLE h, DWORD timeout)
{
DWORD delta, res;
if (timeout == 0)
return WaitForSingleObject(h, 0);
do {
delta = timeout > 1000 ? 1000 : timeout;
Py_BEGIN_ALLOW_THREADS
res = WaitForSingleObject(h, delta);
Py_END_ALLOW_THREADS
if (res != WAIT_TIMEOUT)
return res;
if (PyErr_CheckSignals())
return -2;
if (timeout != INFINITE)
timeout -= delta;
} while (timeout > 0);
return WAIT_TIMEOUT;
}
static PyObject *
_acquire(Blocker *self, DWORD timeout)
{
DWORD res;
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore/mutex is closed");
return NULL;
}
if (self->kind == MUTEX && self->count > 0
&& self->last_tid == PyThread_get_thread_ident()) {
PyErr_SetString(PyExc_AssertionError, "attempt to acquire an owned "
"non-recusive lock");
return NULL;
}
res = wait_and_check_signals(self->handle, timeout);
switch (res) {
case WAIT_TIMEOUT:
Py_RETURN_FALSE;
case WAIT_OBJECT_0:
case WAIT_ABANDONED:
self->last_tid = PyThread_get_thread_ident();
++self->count;
Py_RETURN_TRUE;
case WAIT_FAILED:
return PyErr_SetFromWindowsErr(0);
case -2:
return NULL;
default:
PyErr_SetString(PyExc_AssertionError,
"WaitForSingleObject gave unrecognized value");
return NULL;
}
}
static PyObject *
sync_acquire(Blocker *self, PyObject *args)
{
int blocking = 1;
if (!PyArg_ParseTuple(args, "|i", &blocking))
return NULL;
if (blocking)
return _acquire(self, INFINITE);
else
return _acquire(self, 0);
}
static PyObject *
sync_acquire_timeout(Blocker *self, PyObject *args)
{
double timeout = 0.0;
if (!PyArg_ParseTuple(args, "|d", &timeout))
return NULL;
if (timeout < 0.0)
timeout = 0.0;
return _acquire(self, (DWORD)(timeout * 1000));
}
static PyObject *
sync_release(Blocker *self)
{
int res;
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore/mutex is closed");
return NULL;
}
if (IS_MUTEX(self))
res = ReleaseMutex(self->handle);
else
res = ReleaseSemaphore(self->handle, 1, NULL);
if (res == 0) {
if (GetLastError() == ERROR_TOO_MANY_POSTS) {
PyErr_SetString(PyExc_ValueError,
"Semaphore released too many times");
return NULL;
} else {
return PyErr_SetFromWindowsErr(0);
}
}
--self->count;
Py_RETURN_NONE;
}
static PyObject *
sync_getvalue(Blocker *self)
{
long previous;
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore/mutex is closed");
return NULL;
}
if (IS_MUTEX(self)) {
if (self->count > 0)
return Py_BuildValue("i", 0);
switch (WaitForSingleObject(self->handle, 0)) {
case WAIT_OBJECT_0:
case WAIT_ABANDONED:
if (ReleaseMutex(self->handle))
return Py_BuildValue("i", 1);
else
return PyErr_SetFromWindowsErr(0);
case WAIT_TIMEOUT:
return Py_BuildValue("i", 0);
case WAIT_FAILED:
return PyErr_SetFromWindowsErr(0);
}
} else {
switch (WaitForSingleObject(self->handle, 0)) {
case WAIT_OBJECT_0:
if (ReleaseSemaphore(self->handle, 1, &previous))
return Py_BuildValue("i", previous+1);
else
return PyErr_SetFromWindowsErr(0);
case WAIT_TIMEOUT:
return Py_BuildValue("i", 0);
case WAIT_FAILED:
return PyErr_SetFromWindowsErr(0);
}
}
PyErr_SetString(PyExc_AssertionError, "unexpected value");
return NULL;
}
static PyObject *
sync_ismine(Blocker *self)
{
int res = FALSE;
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore/mutex is closed");
return NULL;
}
if (!IS_MUTEX(self)) {
PyErr_SetString(PyExc_NotImplementedError, "not a mutex");
return NULL;
}
res = self->count > 0 && self->last_tid == PyThread_get_thread_ident();
return Py_BuildValue("i", res);
}
static PyObject *
sync_count(Blocker *self)
{
if (self->handle == NULL) {
PyErr_SetString(PyExc_AssertionError, "semaphore/mutex is closed");
return NULL;
}
return Py_BuildValue("i", self->count);
}
static PyMethodDef sync_methods[] = {
{"acquire", (PyCFunction)sync_acquire, METH_VARARGS,
"acquire the semaphore/mutex"},
{"release", (PyCFunction)sync_release, METH_NOARGS,
"release the semaphore/mutex"},
{"acquire_timeout", (PyCFunction)sync_acquire_timeout, METH_VARARGS,
"acquire the semaphore/mutex using a timeout"},
{"_count", (PyCFunction)sync_count, METH_NOARGS,
"number of `acquire()`s minus number of `release()`s for this process"},
{"_ismine", (PyCFunction)sync_ismine, METH_NOARGS,
"whether the mutex is owned by this thread"},
{"_getvalue", (PyCFunction)sync_getvalue, METH_NOARGS,
"get the value of the semaphore"},
{"_close", (PyCFunction)sync_close, METH_NOARGS,
"close the semaphore/mutex"},
{NULL}
};
#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
#define PyMODINIT_FUNC void
#endif
PyTypeObject BlockerType = {
PyObject_HEAD_INIT(NULL)
0, /* ob_size*/
"_processing.Blocker", /* tp_name*/
sizeof(Blocker), /* tp_basicsize*/
0, /* tp_itemsize*/
(destructor)sync_dealloc,
/* tp_dealloc*/
0, /* tp_print*/
0, /* tp_getattr*/
0, /* tp_setattr*/
0, /* tp_compare*/
0, /* tp_repr*/
0, /* tp_as_number*/
0, /* tp_as_sequence*/
0, /* tp_as_mapping*/
0, /* tp_hash */
0, /* tp_call*/
0, /* tp_str*/
0, /* tp_getattro*/
0, /* tp_setattro*/
0, /* tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
/* tp_flags*/
"Semaphore/Mutex type", /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
sync_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)sync_init, /* tp_init */
0, /* tp_alloc */
(newfunc)sync_new, /* tp_new */
};
#
# Run tests in the modules of this sub-package
#
from processing import activeChildren, freezeSupport, HAVE_NATIVE_SEMAPHORE
def run_test(module, types):
for type in types:
if type == 'processes+server':
print '\n\n------ %s using processes and server process ------\n' \
% module.__name__
elif type == 'processes':
print '\n\n------ %s using processes ------\n' \
% module.__name__
elif type == 'threads':
print '\n\n------ %s using threads ------\n' % module.__name__
else:
raise ValueError
module.config = type
reload(module)
module.test()
def main():
from processing.test import test_processing, test_newtype, \
test_doc, test_speed, test_connection, test_reduction, test_stop
old_processes = set(activeChildren())
run_test(test_doc, ['processes+server', 'threads'])
run_test(test_connection, ['processes', 'threads'])
run_test(test_newtype, ['processes', 'threads'])
run_test(test_reduction, ['processes', 'threads'])
run_test(test_stop, ['processes'])
if HAVE_NATIVE_SEMAPHORE:
from processing.test import test_workers, test_pool
run_test(test_processing, ['processes', 'processes+server', 'threads'])
run_test(test_workers, ['processes'])
run_test(test_pool, ['processes'])
else:
run_test(test_processing, ['processes+server', 'threads'])
processes = set(activeChildren())
assert processes.issubset(old_processes)
if __name__ == '__main__':
freezeSupport()
main()
config = globals().get('config', 'processes')
if config == 'processes':
from processing import *
from processing.connection import Listener, Client, families
elif config == 'threads':
from processing.dummy import *
from processing.dummy.connection import Listener, Client, families
else:
raise ValueError
import socket, threading
#
#
#
long_list = range(1000)
long_string = str(long_list)
length = len(long_string)
#
# Test functions for connection objects
#
def foo(address):
conn = Client(address)
conn.send(long_list)
conn.sendbytes(long_string)
if hasattr(conn, 'recvbytes_into'):
conn.sendbytes(long_string)
def test_conn(conn, use_wrapped_socket=False):
obj = conn.recv()
assert obj == long_list
print 'Test passed: send() / recv()'
s = conn.recvbytes()
assert s == long_string
print 'Test passed: sendbytes() / recvbytes()'
if hasattr(conn, 'recvbytes_into'):
import array
buf = array.array('c', '\0' * (2*length))
bytes_read = conn.recvbytes_into(buf)
assert bytes_read == length
assert buf[:length].tostring() == long_string
print 'Test passed: sendbytes() / recvbytes_into()'
#
# Test functions for socket objects produced by `socket.fromfd()`
#
def bar(address):
conn = Client(address)
sock = socket.fromfd(conn.fileno(), socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(_sock=sock)
##
sock.sendall(long_string)
##
s = long_string
while s:
n = sock.send(s)
s = s[n:]
##
f = sock.makefile()
f.write(long_string + '\n')
f.write('the end\n')
f.flush()
del f
##
sock.close()
def test_fromfd(fd):
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(_sock=sock)
##
remaining = length
s = ''
while remaining > 0:
bufsize = min(8192, remaining)
temp = sock.recv(bufsize)
s += temp
remaining -= len(temp)
assert s == long_string, (len(s), len(long_string))
print 'Test passed: sendall() / recv()'
###
remaining = length
s = ''
while remaining > 0:
bufsize = min(8192, remaining)
temp = sock.recv(bufsize)
s += temp
remaining -= len(temp)
assert s == long_string, (len(s), len(long_string))
print 'Test passed: send() / recv()'
###
f = sock.makefile()
s = f.readline()
assert s == long_string + '\n'
s = f.readline()
assert s == 'the end\n', s
del f
print 'Test passed: socket.makefile()'
###
for i in range(3):
temp = sock.recv(8192)
assert temp == '', 'temp = %r' % temp
print 'Test passed: sock.recv(...) == "" at EOF'
#
#
#
def test():
for fam in families:
print '\tUsing family=%r\n' % fam
l = Listener(family=fam)
p = Process(target=foo, args=[l.address])
p.start()
conn = l.accept()
test_conn(conn)
p.join()
print
if hasattr(socket, 'fromfd') and not issubclass(Process, threading.Thread):
print '\tTesting socket.fromfd()\n'
l = socket.socket()
l.bind(('localhost', 0))
l.listen(1)
address = l.getsockname()
p = Process(target=bar, args=[address])
p.start()
conn, _ = l.accept()
test_fromfd(conn.fileno())
p.join()
print
if __name__ == '__main__':
freezeSupport()
test()
import processing.test
config = globals().get('config', 'processes+server')
if config == 'processes+server':
from processing import *
elif config == 'threads':
from processing.dummy import *
else:
raise ValueError
def test_list():
'''
>>> a = manager.list(range(10))
>>> print a
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> b = manager.list()
>>> print b
[]
>>> b.extend(range(5))
>>> print b
[0, 1, 2, 3, 4]
>>> b[2]
2
>>> b[2:10]
[2, 3, 4]
>>> b *= 2
>>> print b
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
>>> b + [5, 6]
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]
>>> a == manager.list(range(10))
True
>>> a == range(10)
True
>>> a == range(11)
False
>>> a > range(9)
True
>>> range(11) > a
True
>>> d = [a, b]
>>> e = manager.list(d)
>>> print e
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
>>> it = iter(a)
>>> tuple(it)
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
>>> f = manager.list([a])
>>> print f
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
>>> a.append('hello')
>>> print f
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']]
'''
def test_dict():
'''
>>> d = manager.dict()
>>> for i in range(8):
... d[i] = chr(65 + i)
...
>>> print d.copy()
{0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H'}
>>> for item in d.iteritems():
... print item,
...
(0, 'A') (1, 'B') (2, 'C') (3, 'D') (4, 'E') (5, 'F') (6, 'G') (7, 'H')
'''
def test_namespace():
'''
>>> n = manager.Namespace()
>>> n.name = 'Bob'
>>> n.job = 'Builder'
>>> print n # doctest: +ELLIPSIS
Namespace(...='...', ...='...')
>>> hasattr(n, 'job')
True
>>> del n.job
>>> print n
Namespace(name='Bob')
>>> hasattr(n, 'job')
False
'''
def test_bigdata():
'''
>>> n = manager.Namespace()
>>> x = range(100000)
>>> n.value = x
>>> y = n.value
>>> x == y
True
'''
def test_process_repr_getExitCode():
'''
>>> import time
>>> p = Process(target=time.sleep, args=[0.1])
>>> print p, p.getExitCode() # doctest: +ELLIPSIS
<...Process(..., initial)> None
>>> p.start()
>>> print p, p.getExitCode() # doctest: +ELLIPSIS
<...Process(..., started)> None
>>> p.join()
>>> print p, p.getExitCode() # doctest: +ELLIPSIS
<...Process(..., stopped)> 0
'''
def test_condition_repr(cond):
'''
>>> import time
>>> cond = manager.Condition()
>>> print cond
<Condition(<_RLock(None, 0)>, 0)>
>>> cond.acquire()
True
>>> print cond # doctest: +ELLIPSIS
<Condition(<_RLock(Main..., 1)>, 0)>
>>> p = Process(target=test_condition_repr, args=[cond])
>>> p.start()
>>> cond.wait()
>>> time.sleep(0.1)
>>> print cond # doctest: +ELLIPSIS
<Condition(<_RLock(Main..., 1)>, 1)>
>>> cond.notify()
>>> cond.release()
'''
cond.acquire()
cond.notify()
cond.wait()
cond.release()
class Subclass(Process):
'''
>>> l = manager.list([2, 8, 16])
>>> p = Subclass(l)
>>> p.start()
>>> p.join()
>>> print l
[4, 64, 256]
'''
def __init__(self, data):
Process.__init__(self)
self.data = data
def run(self):
for i in range(len(self.data)):
self.data[i] **= 2
def test_recursion(output, level=3):
'''
>>> output = manager.list()
>>> test_recursion(output)
>>> for line in output[:]:
... print line # doctest: +ELLIPSIS
...
<_Main...(Main..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
<...Process(...-..., started)>
'''
output.append(' ' * (3-level) + str(currentProcess()))
if level > 0:
for i in range(2):
p = Process(target=test_recursion, args=[output, level-1])
p.start()
p.join()
def test_activeChildren():
'''
>>> import time
>>> type(activeChildren())
<type 'list'>
>>> p = Process(target=time.sleep, args=[0.1])
>>> p in activeChildren()
False
>>> p.start()
>>> p in activeChildren()
True
>>> p.join()
>>> p in activeChildren()
False
'''
def test_cpuCount():
'''
>>> try: cpus = cpuCount()
... except NotImplementedError: cpus = 1
...
>>> type(cpus) is int
True
>>> cpus >= 1
True
'''
def test(verbose=False):
global manager
import doctest, sys
reload(doctest) # prevent warnings from `DocTestRunner.merge()`
manager = Manager()
if sys.version_info >= (2, 4, 0):
res = doctest.testmod(sys.modules[__name__],
verbose=verbose, exclude_empty=True)
else:
res = doctest.testmod(sys.modules[__name__], verbose=verbose)
if not verbose:
doctest.master.summarize(verbose=True)
if __name__ == '__main__':
freezeSupport()
test(verbose=True)
#
# Example where a pool of http servers share a single listening socket
#
# On Windows this module depends on the ability to pickle a socket
# object so that the worker processes can inherit a copy of the server
# object. (We import `processing.reduction` to enable this pickling.)
#
# Not sure if we should synchronize access to `socket.accept()` method by
# using a process-shared lock -- does not seem to be necessary.
#
import os
import sys
from processing import Process, currentProcess, freezeSupport
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
if sys.platform == 'win32':
import processing.reduction # make sockets pickable/inheritable
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (currentProcess().getName(), format%args))
class RequestHandler(SimpleHTTPRequestHandler):
# we override log_message() to show which process is handling the request
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, RequestHandler)
# create child processes to act as workers
for i in range(number_of_processes-1):
Process(target=serve_forever, args=[server]).start()
# main process also acts as a worker
serve_forever(server)
if __name__ == '__main__':
freezeSupport()
DIR = os.path.join(os.path.dirname(__file__), '..')
ADDRESS = ('localhost', 8000)
NUMBER_OF_PROCESSES = 4
print 'Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES)
print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32']
os.chdir(DIR)
runpool(ADDRESS, NUMBER_OF_PROCESSES)
#
# This module shows how to use arbitrary callables with a subclass of
# `BaseManager`.
#
import processing.test
config = globals().get('config', 'processes')
if config == 'processes':
from processing import *
from processing.managers import *
elif config == 'threads':
from processing.dummy import *
from processing.dummy.managers import *
else:
raise ValueError
##
class Foo(object):
def f(self):
print 'you called Foo.f()'
def g(self):
print 'you called Foo.g()'
def _h(self):
print 'you called Foo._h()'
# A simple generator function
def baz():
for i in xrange(10):
yield i*i
# Proxy type for generator objects
class GeneratorProxy(BaseProxy):
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
##
class MyManager(BaseManager):
# register the Foo class; make all public methods accessible via proxy
Foo1 = CreatorMethod(Foo)
# register the Foo class; make only `g()` and `_h()` accessible via proxy
Foo2 = CreatorMethod(Foo, exposed=('g', '_h'))
# register the generator function baz; use `GeneratorProxy` to make proxies
baz = CreatorMethod(baz, proxytype=GeneratorProxy)
##
def test():
manager = MyManager()
manager.start()
print '-' * 20
f1 = manager.Foo1()
f1.f()
f1.g()
print '-' * 20
f2 = manager.Foo2()
f2.g()
f2._h()
print '-' * 20
it = manager.baz()
for i in it:
print '<%d>' % i,
print
##
if __name__ == '__main__':
freezeSupport()
test()
#
# A test of `processing.Pool` class
#
from processing import Pool, TimeoutError
from processing import cpuCount, currentProcess, freezeSupport, activeChildren
import time, random, sys
#
# Functions used by test code
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(currentProcess().getName(), func.__name__, args, result)
def calculatestar(args):
return calculate(*args)
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
def f(x):
return 1.0 / (x-5.0)
def pow3(x):
return x**3
def noop(x):
pass
#
# Test code
#
def test():
print 'cpuCount() = %d\n' % cpuCount()
#
# Create pool
#
PROCESSES = 4
print 'Creating pool with %d processes\n' % PROCESSES
pool = Pool(PROCESSES)
#
# Tests
#
TASKS = [(mul, (i, 7)) for i in range(10)] + \
[(plus, (i, 8)) for i in range(10)]
results = [pool.apply_async(calculate, t) for t in TASKS]
imap_it = pool.imap(calculatestar, TASKS)
imap_unordered_it = pool.imap_unordered(calculatestar, TASKS)
print 'Ordered results using pool.apply_async():'
for r in results:
print '\t', r.get()
print
print 'Ordered results using pool.imap():'
for x in imap_it:
print '\t', x
print
print 'Unordered results using pool.imap_unordered():'
for x in imap_unordered_it:
print '\t', x
print
print 'Ordered results using pool.map() --- will block till complete:'
for x in pool.map(calculatestar, TASKS):
print '\t', x
print
#
# Simple benchmarks
#
N = 100000
print 'def pow3(x): return x**3'
t = time.time()
A = map(pow3, xrange(N))
print '\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
t = time.time()
B = pool.map(pow3, xrange(N))
print '\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \
(N, time.time() - t)
t = time.time()
C = list(pool.imap(pow3, xrange(N), chunksize=N//8))
print '\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \
' seconds' % (N, N//8, time.time() - t)
assert A == B == C, (len(A), len(B), len(C))
print
L = [None] * 1000000
print 'def noop(x): pass'
print 'L = [None] * 1000000'
t = time.time()
A = map(noop, L)
print '\tmap(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
t = time.time()
B = pool.map(noop, L)
print '\tpool.map(noop, L):\n\t\t%s seconds' % \
(time.time() - t)
t = time.time()
C = list(pool.imap(noop, L, chunksize=len(L)//8))
print '\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \
(len(L)//8, time.time() - t)
assert A == B == C, (len(A), len(B), len(C))
print
del A, B, C, L
#
# Test error handling
#
print 'Testing error handling:'
try:
print pool.apply(f, (5,))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.apply()'
else:
raise AssertionError, 'expected ZeroDivisionError'
try:
print pool.map(f, range(10))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from pool.map()'
else:
raise AssertionError, 'expected ZeroDivisionError'
try:
print list(pool.imap(f, range(10)))
except ZeroDivisionError:
print '\tGot ZeroDivisionError as expected from list(pool.imap())'
else:
raise AssertionError, 'expected ZeroDivisionError'
it = pool.imap(f, range(10))
for i in range(10):
try:
x = it.next()
except ZeroDivisionError:
if i == 5:
pass
except StopIteration:
break
else:
if i == 5:
raise AssertionError, 'expected ZeroDivisionError'
assert i == 9
print '\tGot ZeroDivisionError as expected from IMapIterator.next()'
print
#
# Testing timeouts
#
print 'Testing ApplyResult.get() with timeout:',
res = pool.apply_async(calculate, TASKS[0])
while 1:
sys.stdout.flush()
try:
sys.stdout.write('\n\t%s' % res.get(0.02))
break
except TimeoutError:
sys.stdout.write('.')
print
print
print 'Testing IMapIterator.next() with timeout:',
it = pool.imap(calculatestar, TASKS)
while 1:
sys.stdout.flush()
try:
sys.stdout.write('\n\t%s' % it.next(0.02))
except StopIteration:
break
except TimeoutError:
sys.stdout.write('.')
print
print
#
# Testing callback
#
print 'Testing callback:'
A = []
B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729]
r = pool.apply_async(mul, (7, 8), callback=A.append)
r.wait()
r = pool.map_async(pow3, range(10), callback=A.extend)
r.wait()
if A == B:
print '\tcallbacks succeeded\n'
else:
print '\t*** callbacks failed\n\t\t%s != %s\n' % (A, B)
#
# Check there are no outstanding tasks
#
assert not pool._cache, 'cache = %r' % pool._cache
#
# Check close() methods
#
print 'Testing close():'
for worker in pool._pool:
assert worker.isAlive()
result = pool.apply_async(time.sleep, [0.5])
pool.close()
pool.join()
assert result.get() is None
for worker in pool._pool:
assert not worker.isAlive()
print '\tclose() succeeded\n'
#
# Check terminate() method
#
print 'Testing terminate():'
pool = Pool(2)
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [10]) for i in range(10)]
pool.terminate()
pool.join()
for worker in pool._pool:
assert not worker.isAlive()
print '\tterminate() succeeded\n'
#
# Check garbage collection
#
print 'Testing garbage collection:'
pool = Pool(2)
processes = pool._pool
ignore = pool.apply(pow3, [2])
results = [pool.apply_async(time.sleep, [10]) for i in range(10)]
del results, pool
time.sleep(0.2)
for worker in processes:
assert not worker.isAlive()
print '\tgarbage collection succeeded\n'
if __name__ == '__main__':
freezeSupport()
test()
#
# A test file for the `processing` package
#
import time, sys, random
from Queue import Empty
config = globals().get('config', 'processes')
if config == 'processes+server':
from processing import *
elif config == 'threads':
from processing.dummy import *
elif config == 'processes':
from processing import *
try:
Manager = LocalManager
except NameError:
Manager = None
else:
raise ValueError, config
#### TEST_NAMESPACE
def namespace_func(running, mutex):
random.seed()
time.sleep(random.random()*4)
mutex.acquire()
print '\n\t\t\t' + str(currentProcess()) + ' has finished'
running.value -= 1
mutex.release()
def test_namespace(manager):
TASKS = 10
running = manager.SharedValue('i', TASKS)
mutex = manager.Lock()
for i in range(TASKS):
Process(target=namespace_func, args=[running, mutex]).start()
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print running.value,
sys.stdout.flush()
mutex.release()
print
print 'No more running processes'
#### TEST_QUEUE
def queue_func(queue):
for i in range(30):
time.sleep(0.5 * random.random())
queue.put(i*i)
queue.put('STOP')
def test_queue(manager):
q = manager.Queue()
p = Process(target=queue_func, args=[q])
p.start()
o = None
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print o,
sys.stdout.flush()
except Empty:
print 'TIMEOUT'
print
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print '\t' + str(cond)
time.sleep(2)
print '\tchild is notifying'
print '\t' + str(cond)
cond.notify()
cond.release()
def test_condition(manager):
cond = manager.Condition()
p = Process(target=condition_func, args=[cond])
print cond
cond.acquire()
print cond
cond.acquire()
print cond
p.start()
print 'main is waiting'
cond.wait()
print 'main has woken up'
print cond
cond.release()
print cond
cond.release()
p.join()
print cond
#### TEST_SEMAPHORE
def semaphore_func(sema, mutex, running):
sema.acquire()
mutex.acquire()
running.value += 1
print running.value, 'tasks are running'
mutex.release()
random.seed()
time.sleep(random.random()*2)
mutex.acquire()
running.value -= 1
print '%s has finished' % currentProcess()
mutex.release()
sema.release()
def test_semaphore(manager):
sema = manager.Semaphore(3)
mutex = manager.RLock()
running = manager.SharedValue('i', 0)
processes = [Process(target=semaphore_func, args=[sema, mutex, running])
for i in range(10)]
for p in processes:
p.start()
for p in processes:
p.join()
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print '\tchild sleeping'
time.sleep(5.5)
print '\n\tchild terminating'
def test_join_timeout(manager):
p = Process(target=join_timeout_func)
p.start()
print 'waiting for process to finish'
while 1:
p.join(timeout=1)
if not p.isAlive():
break
print '.',
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print '\t%r is waiting' % currentProcess()
event.wait()
print '\t%r has woken up' % currentProcess()
def test_event(manager):
event = manager.Event()
processes = [Process(target=event_func, args=[event]) for i in range(5)]
for p in processes:
p.start()
print 'main is sleeping'
time.sleep(2)
print 'main is setting event'
event.set()
for p in processes:
p.join()
#### TEST_SHAREDVALUES
def sharedvalues_func(values, structs, arrays,
shared_values, shared_structs, shared_arrays):
for i in range(len(values)):
v = values[i][1]
sv = shared_values[i].value
assert v == sv
for i in range(len(structs)):
s = structs[i][1]
ss = shared_structs[i].value
assert s == ss, (s, ss)
for i in range(len(values)):
a = arrays[i][1]
sa = list(shared_arrays[i][:])
assert a == sa
print 'Tests passed'
def test_sharedvalues(manager):
if sys.platform == 'cygwin' and hasattr(manager, '_getheap'):
print >>sys.stderr, 'cygwin does not allow resizing of mmaps'
return
values = [
('i', 10),
('h', -2),
('16p', 'hello')
]
structs = [
('hd', (10, 0.75)),
('10d', tuple(0.375 * i for i in range(10))),
('cccc', ('a', 'b', 'c', 'd'))
]
arrays = [
('i', range(100)),
('d', [0.25 * i for i in range(100)]),
('H', range(1000))
]
shared_values = [manager.SharedValue(id, v) for id, v in values]
shared_structs = [manager.SharedStruct(id, s) for id, s in structs]
shared_arrays = [manager.SharedArray(id, a) for id, a in arrays]
p = Process(
target=sharedvalues_func,
args=(values, structs, arrays,
shared_values, shared_structs, shared_arrays)
)
p.start()
p.join()
assert p.getExitCode() == 0
####
def test():
manager = Manager()
try:
for func in [ test_namespace, test_queue, test_condition,
test_semaphore, test_join_timeout, test_event,
test_sharedvalues ]:
print '\n\t######## %s\n' % func.__name__
func(manager)
ignore = activeChildren() # cleanup any old processes
info = manager._debug_info()
if info is not None:
print info
raise ValueError, 'there should be no positive refcounts left'
finally:
manager.shutdown()
if __name__ == '__main__':
freezeSupport()
test()
#
# We test the sharing of connection objects and sockets between processes
#
# Will if `processing._processing` is available.
#
config = globals().get('config', 'processes')
if config == 'processes':
from processing import *
from processing.connection import Listener, Client, families, \
connections_are_picklable
import socket
elif config == 'threads':
from processing.dummy import *
from processing.dummy.connection import *
from processing.dummy.connection import Listener, Client, families, \
connections_are_picklable
else:
raise ValueError
#
# Test functions
#
def child_client(address):
conn_to_parent = Client(address)
conn = conn_to_parent.recv()
print 'child received %s' % conn
if hasattr(conn, 'poll'):
print 'child receiving message over connection: %r' % conn.recv()
else:
print 'child receiving message over socket: %r' % conn.recv(100)
def remote_conn_client(address):
c = Client(address)
c.send('hello world')
def remote_socket_client(address):
s = socket.socket()
s.connect(address)
s.sendall('hello world')
def _test(family, use_socket=False):
# start child process and set up a connection with it
child_listener = Listener()
child_process = Process(target=child_client,
args=[child_listener.address])
child_process.start()
child_conn = child_listener.accept()
child_listener.close()
if use_socket:
listener = socket.socket()
listener.bind(('localhost', 0))
listener.listen(1)
address = listener.getsockname()
else:
listener = Listener(family=family)
address = listener.address
# start a pretend remote client
if use_socket:
remote_process = Process(target=remote_socket_client, args=[address])
else:
remote_process = Process(target=remote_conn_client, args=[address])
remote_process.start()
# accept a connection from remote client
if use_socket:
conn = listener.accept()[0]
else:
conn = listener.accept()
# send connection object to child process
print 'parent sending %r' % conn
child_conn.send(conn)
# join processes
remote_process.join()
child_process.join()
def test():
if not connections_are_picklable:
import sys
print >>sys.stderr, '''
Cannot run `test_reduction.test()`.
You need to have compiled the C extension
`processing._processing` to transfer file descriptors/handles
between processes.
'''
return
for fam in families:
print '\n #### Using family=%s\n' % fam
_test(fam)
if 'AF_INET' in families:
print '\n #### Using real sockets\n'
_test(None, use_socket=True)
if __name__ == '__main__':
freezeSupport()
test()
#
# Simple benchmarks for the processing package
#
import time, sys, processing, threading, Queue, gc
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in xrange(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=[q, c, iterations])
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through the queue in', \
elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_PIPESPEED
def pipe_func(c, iterations):
a = '0' * 256
res = c.recv()
assert res == 'START'
for i in xrange(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = processing.Pipe()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = processing.Process(target=pipe_func, args=[d, iterations])
p.start()
c.send('START')
t = _timer()
result = None
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print iterations, 'objects passed through connection in', \
elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
a = seq[5]
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in xrange(iterations):
l.acquire()
l.release()
elapsed = _timer()-t
print iterations, 'iterations in', elapsed, 'seconds'
print 'average number/sec:', iterations/elapsed
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in xrange(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=[c, iterations])
p.start()
c.wait()
t = _timer()
for i in xrange(iterations):
c.notify()
c.wait()
elapsed = _timer()-t
c.release()
p.join()
print iterations * 2, 'waits in', elapsed, 'seconds'
print 'average number/sec:', iterations * 2 / elapsed
####
def test():
import processing
try:
from processing.sharedctypes import new_array
except ImportError:
new_array = None
manager = processing.Manager()
local_manager = processing.LocalManager()
gc.disable()
print '\n\t######## testing Queue.Queue\n'
test_queuespeed(threading.Thread, Queue.Queue(),
threading.Condition())
print '\n\t######## testing processing.Queue\n'
test_queuespeed(processing.Process, processing.Queue(),
processing.Condition())
print '\n\t######## testing Queue managed by server process\n'
test_queuespeed(processing.Process, manager.Queue(),
manager.Condition())
print '\n\t######## testing processing.Pipe\n'
test_pipespeed()
print
print '\n\t######## testing list\n'
test_seqspeed(range(10))
print '\n\t######## testing LocalManager.SharedArray("i", ...)\n'
test_seqspeed(local_manager.SharedArray('i', range(10)))
print '\n\t######## testing list managed by server process\n'
test_seqspeed(manager.list(range(10)))
if new_array:
print '\n\t######## testing sharedctypes.new_array("i", ...)\n'
test_seqspeed(new_array('i', range(10)))
print
print '\n\t######## testing threading.Lock\n'
test_lockspeed(threading.Lock())
print '\n\t######## testing threading.RLock\n'
test_lockspeed(threading.RLock())
print '\n\t######## testing processing.Lock\n'
test_lockspeed(processing.Lock())
print '\n\t######## testing processing.RLock\n'
test_lockspeed(processing.RLock())
print '\n\t######## testing lock managed by server process\n'
test_lockspeed(manager.Lock())
print '\n\t######## testing rlock managed by server process\n'
test_lockspeed(manager.RLock())
print
print '\n\t######## testing threading.Condition\n'
test_conditionspeed(threading.Thread, threading.Condition())
print '\n\t######## testing processing.Condition\n'
test_conditionspeed(processing.Process, processing.Condition())
print '\n\t######## testing condition managed by a server process\n'
test_conditionspeed(processing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
processing.freezeSupport()
test()
import time, sys, processing
def foo(n):
for i in range(25):
try:
time.sleep(0.1)
sys.stderr.write('-')
sys.stderr.flush()
except processing.ProcessExit:
print
if n == 0:
print >>sys.stderr, 'exiting normally'
break
elif n == 1:
print >>sys.stderr, 'exiting with exit code 1'
sys.exit(1)
elif n == 2:
print >>sys.stderr, 'reraising ProcessExit'
raise
else:
print >>sys.stderr, 'trying to ignore ProcessExit'
continue
def run():
'''
The output shown below only includes output from the parent process:
>>> run() #doctest: +ELLIPSIS
<BLANKLINE>
TEST 1
<Process(Process-..., stopped)>
<BLANKLINE>
TEST 2
<Process(Process-..., stopped[1])>
<BLANKLINE>
TEST 3
<Process(Process-..., stopped[ProcessExit])>
<BLANKLINE>
TEST 4
<Process(Process-..., stopped)>
<BLANKLINE>
TEST 5
<Process(Process-..., stopped[SIGTERM])>
'''
for i in range(4):
print '\nTEST %d' % (i+1)
p = processing.Process(target=foo, args=[i])
p.setStoppable(True)
p.start()
time.sleep(1.5)
p.stop()
p.join()
print p
print '\nTEST 5'
p = processing.Process(target=foo, args=[None])
p.start()
time.sleep(1.5)
print >>sys.stderr, '\nterminating process'
p.terminate()
p.join()
print p
def test():
import doctest, sys
if not hasattr(processing.Process, 'stop'):
print >>sys.stderr, '''
Cannot run `test_stop.test()`.
You need to have compiled the C extension
`processing._processing` to use `Process.stop()`
on Windows.
'''
return
res = doctest.testmod(sys.modules[__name__], verbose=False)
print
if res[0] != 0:
print '%s failures out of %s tests.' % res
else:
print 'All tests passed.'
if __name__ == '__main__':
processing.freezeSupport()
run()
#
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
import time
import random
from processing import currentProcess, Process, freezeSupport
from processing import Queue
#
# Function run by worker processes
#
def worker(input, output):
for item in iter(input.get, 'STOP'):
func, args = item
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(currentProcess().getName(), func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
task_queue.putmany(TASKS1)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=[task_queue, done_queue]).start()
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS1)):
print '\t', done_queue.get()
# Add more tasks using `put()` instead of `putmany()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freezeSupport()
test()