asda?‰PNG  IHDR ? f ??C1 sRGB ??é gAMA ±? üa pHYs ? ??o¨d GIDATx^íüL”÷e÷Y?a?("Bh?_ò???¢§?q5k?*:t0A-o??¥]VkJ¢M??f?±8\k2íll£1]q?ù???T __pycache__/__init__.cpython-36.pyc000064400000000176151026775010013151 0ustar003 \&@sdS)Nrrr+/usr/lib64/python3.6/concurrent/__init__.pys__pycache__/__init__.cpython-36.opt-1.pyc000064400000000176151026775010014110 0ustar003 \&@sdS)Nrrr+/usr/lib64/python3.6/concurrent/__init__.pys__pycache__/__init__.cpython-36.opt-2.pyc000064400000000176151026775010014111 0ustar003 \&@sdS)Nrrr+/usr/lib64/python3.6/concurrent/__init__.pysfutures/_base.py000064400000051363151026775010007700 0ustar00# Copyright 2009 Brian Quinlan. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. __author__ = 'Brian Quinlan (brian@sweetapp.com)' import collections import logging import threading import time FIRST_COMPLETED = 'FIRST_COMPLETED' FIRST_EXCEPTION = 'FIRST_EXCEPTION' ALL_COMPLETED = 'ALL_COMPLETED' _AS_COMPLETED = '_AS_COMPLETED' # Possible future states (for internal use by the futures package). PENDING = 'PENDING' RUNNING = 'RUNNING' # The future was cancelled by the user... CANCELLED = 'CANCELLED' # ...and _Waiter.add_cancelled() was called by a worker. CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' FINISHED = 'FINISHED' _FUTURE_STATES = [ PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED ] _STATE_TO_DESCRIPTION_MAP = { PENDING: "pending", RUNNING: "running", CANCELLED: "cancelled", CANCELLED_AND_NOTIFIED: "cancelled", FINISHED: "finished" } # Logger for internal use by the futures package. LOGGER = logging.getLogger("concurrent.futures") class Error(Exception): """Base class for all future-related exceptions.""" pass class CancelledError(Error): """The Future was cancelled.""" pass class TimeoutError(Error): """The operation exceeded the given deadline.""" pass class _Waiter(object): """Provides the event that wait() and as_completed() block on.""" def __init__(self): self.event = threading.Event() self.finished_futures = [] def add_result(self, future): self.finished_futures.append(future) def add_exception(self, future): self.finished_futures.append(future) def add_cancelled(self, future): self.finished_futures.append(future) class _AsCompletedWaiter(_Waiter): """Used by as_completed().""" def __init__(self): super(_AsCompletedWaiter, self).__init__() self.lock = threading.Lock() def add_result(self, future): with self.lock: super(_AsCompletedWaiter, self).add_result(future) self.event.set() def add_exception(self, future): with self.lock: super(_AsCompletedWaiter, self).add_exception(future) self.event.set() def add_cancelled(self, future): with self.lock: super(_AsCompletedWaiter, self).add_cancelled(future) self.event.set() class _FirstCompletedWaiter(_Waiter): """Used by wait(return_when=FIRST_COMPLETED).""" def add_result(self, future): super().add_result(future) self.event.set() def add_exception(self, future): super().add_exception(future) self.event.set() def add_cancelled(self, future): super().add_cancelled(future) self.event.set() class _AllCompletedWaiter(_Waiter): """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" def __init__(self, num_pending_calls, stop_on_exception): self.num_pending_calls = num_pending_calls self.stop_on_exception = stop_on_exception self.lock = threading.Lock() super().__init__() def _decrement_pending_calls(self): with self.lock: self.num_pending_calls -= 1 if not self.num_pending_calls: self.event.set() def add_result(self, future): super().add_result(future) self._decrement_pending_calls() def add_exception(self, future): super().add_exception(future) if self.stop_on_exception: self.event.set() else: self._decrement_pending_calls() def add_cancelled(self, future): super().add_cancelled(future) self._decrement_pending_calls() class _AcquireFutures(object): """A context manager that does an ordered acquire of Future conditions.""" def __init__(self, futures): self.futures = sorted(futures, key=id) def __enter__(self): for future in self.futures: future._condition.acquire() def __exit__(self, *args): for future in self.futures: future._condition.release() def _create_and_install_waiters(fs, return_when): if return_when == _AS_COMPLETED: waiter = _AsCompletedWaiter() elif return_when == FIRST_COMPLETED: waiter = _FirstCompletedWaiter() else: pending_count = sum( f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) if return_when == FIRST_EXCEPTION: waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) elif return_when == ALL_COMPLETED: waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) else: raise ValueError("Invalid return condition: %r" % return_when) for f in fs: f._waiters.append(waiter) return waiter def _yield_finished_futures(fs, waiter, ref_collect): """ Iterate on the list *fs*, yielding finished futures one by one in reverse order. Before yielding a future, *waiter* is removed from its waiters and the future is removed from each set in the collection of sets *ref_collect*. The aim of this function is to avoid keeping stale references after the future is yielded and before the iterator resumes. """ while fs: f = fs[-1] for futures_set in ref_collect: futures_set.remove(f) with f._condition: f._waiters.remove(waiter) del f # Careful not to keep a reference to the popped value yield fs.pop() def as_completed(fs, timeout=None): """An iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). If any given Futures are duplicated, they will be returned once. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. """ if timeout is not None: end_time = timeout + time.monotonic() fs = set(fs) total_futures = len(fs) with _AcquireFutures(fs): finished = set( f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) pending = fs - finished waiter = _create_and_install_waiters(fs, _AS_COMPLETED) finished = list(finished) try: yield from _yield_finished_futures(finished, waiter, ref_collect=(fs,)) while pending: if timeout is None: wait_timeout = None else: wait_timeout = end_time - time.monotonic() if wait_timeout < 0: raise TimeoutError( '%d (of %d) futures unfinished' % ( len(pending), total_futures)) waiter.event.wait(wait_timeout) with waiter.lock: finished = waiter.finished_futures waiter.finished_futures = [] waiter.event.clear() # reverse to keep finishing order finished.reverse() yield from _yield_finished_futures(finished, waiter, ref_collect=(fs, pending)) finally: # Remove waiter from unfinished futures for f in fs: with f._condition: f._waiters.remove(waiter) DoneAndNotDoneFutures = collections.namedtuple( 'DoneAndNotDoneFutures', 'done not_done') def wait(fs, timeout=None, return_when=ALL_COMPLETED): """Wait for the futures in the given sequence to complete. Args: fs: The sequence of Futures (possibly created by different Executors) to wait upon. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. return_when: Indicates when this function should return. The options are: FIRST_COMPLETED - Return when any future finishes or is cancelled. FIRST_EXCEPTION - Return when any future finishes by raising an exception. If no future raises an exception then it is equivalent to ALL_COMPLETED. ALL_COMPLETED - Return when all futures finish or are cancelled. Returns: A named 2-tuple of sets. The first set, named 'done', contains the futures that completed (is finished or cancelled) before the wait completed. The second set, named 'not_done', contains uncompleted futures. """ with _AcquireFutures(fs): done = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) not_done = set(fs) - done if (return_when == FIRST_COMPLETED) and done: return DoneAndNotDoneFutures(done, not_done) elif (return_when == FIRST_EXCEPTION) and done: if any(f for f in done if not f.cancelled() and f.exception() is not None): return DoneAndNotDoneFutures(done, not_done) if len(done) == len(fs): return DoneAndNotDoneFutures(done, not_done) waiter = _create_and_install_waiters(fs, return_when) waiter.event.wait(timeout) for f in fs: with f._condition: f._waiters.remove(waiter) done.update(waiter.finished_futures) return DoneAndNotDoneFutures(done, set(fs) - done) class Future(object): """Represents the result of an asynchronous computation.""" def __init__(self): """Initializes the future. Should not be called by clients.""" self._condition = threading.Condition() self._state = PENDING self._result = None self._exception = None self._waiters = [] self._done_callbacks = [] def _invoke_callbacks(self): for callback in self._done_callbacks: try: callback(self) except Exception: LOGGER.exception('exception calling callback for %r', self) def __repr__(self): with self._condition: if self._state == FINISHED: if self._exception: return '<%s at %#x state=%s raised %s>' % ( self.__class__.__name__, id(self), _STATE_TO_DESCRIPTION_MAP[self._state], self._exception.__class__.__name__) else: return '<%s at %#x state=%s returned %s>' % ( self.__class__.__name__, id(self), _STATE_TO_DESCRIPTION_MAP[self._state], self._result.__class__.__name__) return '<%s at %#x state=%s>' % ( self.__class__.__name__, id(self), _STATE_TO_DESCRIPTION_MAP[self._state]) def cancel(self): """Cancel the future if possible. Returns True if the future was cancelled, False otherwise. A future cannot be cancelled if it is running or has already completed. """ with self._condition: if self._state in [RUNNING, FINISHED]: return False if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: return True self._state = CANCELLED self._condition.notify_all() self._invoke_callbacks() return True def cancelled(self): """Return True if the future was cancelled.""" with self._condition: return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] def running(self): """Return True if the future is currently executing.""" with self._condition: return self._state == RUNNING def done(self): """Return True of the future was cancelled or finished executing.""" with self._condition: return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] def __get_result(self): if self._exception: raise self._exception else: return self._result def add_done_callback(self, fn): """Attaches a callable that will be called when the future finishes. Args: fn: A callable that will be called with this future as its only argument when the future completes or is cancelled. The callable will always be called by a thread in the same process in which it was added. If the future has already completed or been cancelled then the callable will be called immediately. These callables are called in the order that they were added. """ with self._condition: if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: self._done_callbacks.append(fn) return fn(self) def result(self, timeout=None): """Return the result of the call that the future represents. Args: timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time. Returns: The result of the call that the future represents. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. Exception: If the call raised then that exception will be raised. """ with self._condition: if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self.__get_result() self._condition.wait(timeout) if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self.__get_result() else: raise TimeoutError() def exception(self, timeout=None): """Return the exception raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. """ with self._condition: if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self._exception self._condition.wait(timeout) if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self._exception else: raise TimeoutError() # The following methods should only be used by Executors and in tests. def set_running_or_notify_cancel(self): """Mark the future as running or process any cancel notifications. Should only be used by Executor implementations and unit tests. If the future has been cancelled (cancel() was called and returned True) then any threads waiting on the future completing (though calls to as_completed() or wait()) are notified and False is returned. If the future was not cancelled then it is put in the running state (future calls to running() will return True) and True is returned. This method should be called by Executor implementations before executing the work associated with this future. If this method returns False then the work should not be executed. Returns: False if the Future was cancelled, True otherwise. Raises: RuntimeError: if this method was already called or if set_result() or set_exception() was called. """ with self._condition: if self._state == CANCELLED: self._state = CANCELLED_AND_NOTIFIED for waiter in self._waiters: waiter.add_cancelled(self) # self._condition.notify_all() is not necessary because # self.cancel() triggers a notification. return False elif self._state == PENDING: self._state = RUNNING return True else: LOGGER.critical('Future %s in unexpected state: %s', id(self), self._state) raise RuntimeError('Future in unexpected state') def set_result(self, result): """Sets the return value of work associated with the future. Should only be used by Executor implementations and unit tests. """ with self._condition: self._result = result self._state = FINISHED for waiter in self._waiters: waiter.add_result(self) self._condition.notify_all() self._invoke_callbacks() def set_exception(self, exception): """Sets the result of the future as being the given exception. Should only be used by Executor implementations and unit tests. """ with self._condition: self._exception = exception self._state = FINISHED for waiter in self._waiters: waiter.add_exception(self) self._condition.notify_all() self._invoke_callbacks() class Executor(object): """This is an abstract base class for concrete asynchronous executors.""" def submit(self, fn, *args, **kwargs): """Submits a callable to be executed with the given arguments. Schedules the callable to be executed as fn(*args, **kwargs) and returns a Future instance representing the execution of the callable. Returns: A Future representing the given call. """ raise NotImplementedError() def map(self, fn, *iterables, timeout=None, chunksize=1): """Returns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. chunksize: The size of the chunks the iterable will be broken into before being passed to a child process. This argument is only used by ProcessPoolExecutor; it is ignored by ThreadPoolExecutor. Returns: An iterator equivalent to: map(func, *iterables) but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If fn(*args) raises for any values. """ if timeout is not None: end_time = timeout + time.monotonic() fs = [self.submit(fn, *args) for args in zip(*iterables)] # Yield must be hidden in closure so that the futures are submitted # before the first iterator value is required. def result_iterator(): try: # reverse to keep finishing order fs.reverse() while fs: # Careful not to keep a reference to the popped future if timeout is None: yield fs.pop().result() else: yield fs.pop().result(end_time - time.monotonic()) finally: for future in fs: future.cancel() return result_iterator() def shutdown(self, wait=True): """Clean-up the resources associated with the Executor. It is safe to call this method several times. Otherwise, no other methods can be called after this one. Args: wait: If True then shutdown will not return until all running futures have finished executing and the resources used by the executor have been reclaimed. """ pass def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown(wait=True) return False futures/process.py000064400000050014151026775010010275 0ustar00# Copyright 2009 Brian Quinlan. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Implements ProcessPoolExecutor. The follow diagram and text describe the data-flow through the system: |======================= In-process =====================|== Out-of-process ==| +----------+ +----------+ +--------+ +-----------+ +---------+ | | => | Work Ids | => | | => | Call Q | => | | | | +----------+ | | +-----------+ | | | | | ... | | | | ... | | | | | | 6 | | | | 5, call() | | | | | | 7 | | | | ... | | | | Process | | ... | | Local | +-----------+ | Process | | Pool | +----------+ | Worker | | #1..n | | Executor | | Thread | | | | | +----------- + | | +-----------+ | | | | <=> | Work Items | <=> | | <= | Result Q | <= | | | | +------------+ | | +-----------+ | | | | | 6: call() | | | | ... | | | | | | future | | | | 4, result | | | | | | ... | | | | 3, except | | | +----------+ +------------+ +--------+ +-----------+ +---------+ Executor.submit() called: - creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict - adds the id of the _WorkItem to the "Work Ids" queue Local worker thread: - reads work ids from the "Work Ids" queue and looks up the corresponding WorkItem from the "Work Items" dict: if the work item has been cancelled then it is simply removed from the dict, otherwise it is repackaged as a _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). - reads _ResultItems from "Result Q", updates the future stored in the "Work Items" dict and deletes the dict entry Process #1..n: - reads _CallItems from "Call Q", executes the calls, and puts the resulting _ResultItems in "Result Q" """ __author__ = 'Brian Quinlan (brian@sweetapp.com)' import atexit import os from concurrent.futures import _base import queue from queue import Full import multiprocessing from multiprocessing import SimpleQueue from multiprocessing.connection import wait import threading import weakref from functools import partial import itertools import traceback # Workers are created as daemon threads and processes. This is done to allow the # interpreter to exit when there are still idle processes in a # ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, # allowing workers to die with the interpreter has two undesirable properties: # - The workers would still be running during interpreter shutdown, # meaning that they would fail in unpredictable ways. # - The workers could be killed while evaluating a work item, which could # be bad if the callable being evaluated has external side-effects e.g. # writing to a file. # # To work around this problem, an exit handler is installed which tells the # workers to exit when their work queues are empty and then waits until the # threads/processes finish. _threads_queues = weakref.WeakKeyDictionary() _shutdown = False def _python_exit(): global _shutdown _shutdown = True items = list(_threads_queues.items()) for t, q in items: q.put(None) for t, q in items: t.join() # Controls how many more calls than processes will be queued in the call queue. # A smaller number will mean that processes spend more time idle waiting for # work while a larger number will make Future.cancel() succeed less frequently # (Futures in the call queue cannot be cancelled). EXTRA_QUEUED_CALLS = 1 # Hack to embed stringification of remote traceback in local traceback class _RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class _ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return _rebuild_exc, (self.exc, self.tb) def _rebuild_exc(exc, tb): exc.__cause__ = _RemoteTraceback(tb) return exc class _WorkItem(object): def __init__(self, future, fn, args, kwargs): self.future = future self.fn = fn self.args = args self.kwargs = kwargs class _ResultItem(object): def __init__(self, work_id, exception=None, result=None): self.work_id = work_id self.exception = exception self.result = result class _CallItem(object): def __init__(self, work_id, fn, args, kwargs): self.work_id = work_id self.fn = fn self.args = args self.kwargs = kwargs def _get_chunks(*iterables, chunksize): """ Iterates over zip()ed iterables in chunks. """ it = zip(*iterables) while True: chunk = tuple(itertools.islice(it, chunksize)) if not chunk: return yield chunk def _process_chunk(fn, chunk): """ Processes a chunk of an iterable passed to map. Runs the function passed to map() on a chunk of the iterable passed to map. This function is run in a separate process. """ return [fn(*args) for args in chunk] def _process_worker(call_queue, result_queue): """Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty. """ while True: call_item = call_queue.get(block=True) if call_item is None: # Wake up queue management thread result_queue.put(os.getpid()) return try: r = call_item.fn(*call_item.args, **call_item.kwargs) except BaseException as e: exc = _ExceptionWithTraceback(e, e.__traceback__) result_queue.put(_ResultItem(call_item.work_id, exception=exc)) else: result_queue.put(_ResultItem(call_item.work_id, result=r)) def _add_call_item_to_queue(pending_work_items, work_ids, call_queue): """Fills call_queue with _WorkItems from pending_work_items. This function never blocks. Args: pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids are consumed and the corresponding _WorkItems from pending_work_items are transformed into _CallItems and put in call_queue. call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems. """ while True: if call_queue.full(): return try: work_id = work_ids.get(block=False) except queue.Empty: return else: work_item = pending_work_items[work_id] if work_item.future.set_running_or_notify_cancel(): call_queue.put(_CallItem(work_id, work_item.fn, work_item.args, work_item.kwargs), block=True) else: del pending_work_items[work_id] continue def _queue_management_worker(executor_reference, processes, pending_work_items, work_ids_queue, call_queue, result_queue): """Manages the communication between this process and the worker processes. This function is run in a local thread. Args: executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. process: A list of the multiprocessing.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. result_queue: A multiprocessing.Queue of _ResultItems generated by the process workers. """ executor = None def shutting_down(): return _shutdown or executor is None or executor._shutdown_thread def shutdown_worker(): # This is an upper bound nb_children_alive = sum(p.is_alive() for p in processes.values()) for i in range(0, nb_children_alive): call_queue.put_nowait(None) # Release the queue's resources as soon as possible. call_queue.close() # If .join() is not called on the created processes then # some multiprocessing.Queue methods may deadlock on Mac OS X. for p in processes.values(): p.join() reader = result_queue._reader while True: _add_call_item_to_queue(pending_work_items, work_ids_queue, call_queue) sentinels = [p.sentinel for p in processes.values()] assert sentinels ready = wait([reader] + sentinels) if reader in ready: result_item = reader.recv() else: # Mark the process pool broken so that submits fail right now. executor = executor_reference() if executor is not None: executor._broken = True executor._shutdown_thread = True executor = None # All futures in flight must be marked failed for work_id, work_item in pending_work_items.items(): work_item.future.set_exception( BrokenProcessPool( "A process in the process pool was " "terminated abruptly while the future was " "running or pending." )) # Delete references to object. See issue16284 del work_item pending_work_items.clear() # Terminate remaining workers forcibly: the queues or their # locks may be in a dirty state and block forever. for p in processes.values(): p.terminate() shutdown_worker() return if isinstance(result_item, int): # Clean shutdown of a worker using its PID # (avoids marking the executor broken) assert shutting_down() p = processes.pop(result_item) p.join() if not processes: shutdown_worker() return elif result_item is not None: work_item = pending_work_items.pop(result_item.work_id, None) # work_item can be None if another process terminated (see above) if work_item is not None: if result_item.exception: work_item.future.set_exception(result_item.exception) else: work_item.future.set_result(result_item.result) # Delete references to object. See issue16284 del work_item # Check whether we should start shutting down. executor = executor_reference() # No more work items can be added if: # - The interpreter is shutting down OR # - The executor that owns this worker has been collected OR # - The executor that owns this worker has been shutdown. if shutting_down(): try: # Since no new work items can be added, it is safe to shutdown # this thread if there are no pending work items. if not pending_work_items: shutdown_worker() return except Full: # This is not a problem: we will eventually be woken up (in # result_queue.get()) and be able to send a sentinel again. pass executor = None _system_limits_checked = False _system_limited = None def _check_system_limits(): global _system_limits_checked, _system_limited if _system_limits_checked: if _system_limited: raise NotImplementedError(_system_limited) _system_limits_checked = True try: nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems_max == -1: # indetermined limit, assume that limit is determined # by available memory only return if nsems_max >= 256: # minimum number of semaphores available # according to POSIX return _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max raise NotImplementedError(_system_limited) def _chain_from_iterable_of_lists(iterable): """ Specialized implementation of itertools.chain.from_iterable. Each item in *iterable* should be a list. This function is careful not to keep references to yielded objects. """ for element in iterable: element.reverse() while element: yield element.pop() class BrokenProcessPool(RuntimeError): """ Raised when a process in a ProcessPoolExecutor terminated abruptly while a future was in the running state. """ class ProcessPoolExecutor(_base.Executor): def __init__(self, max_workers=None): """Initializes a new ProcessPoolExecutor instance. Args: max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors. """ _check_system_limits() if max_workers is None: self._max_workers = os.cpu_count() or 1 else: if max_workers <= 0: raise ValueError("max_workers must be greater than 0") self._max_workers = max_workers # Make the call queue slightly larger than the number of processes to # prevent the worker processes from idling. But don't make it too big # because futures in the call queue cannot be cancelled. self._call_queue = multiprocessing.Queue(self._max_workers + EXTRA_QUEUED_CALLS) # Killed worker processes can produce spurious "broken pipe" # tracebacks in the queue's own worker thread. But we detect killed # processes anyway, so silence the tracebacks. self._call_queue._ignore_epipe = True self._result_queue = SimpleQueue() self._work_ids = queue.Queue() self._queue_management_thread = None # Map of pids to processes self._processes = {} # Shutdown is a two-step process. self._shutdown_thread = False self._shutdown_lock = threading.Lock() self._broken = False self._queue_count = 0 self._pending_work_items = {} def _start_queue_management_thread(self): # When the executor gets lost, the weakref callback will wake up # the queue management thread. def weakref_cb(_, q=self._result_queue): q.put(None) if self._queue_management_thread is None: # Start the processes so that their sentinels are known. self._adjust_process_count() self._queue_management_thread = threading.Thread( target=_queue_management_worker, args=(weakref.ref(self, weakref_cb), self._processes, self._pending_work_items, self._work_ids, self._call_queue, self._result_queue)) self._queue_management_thread.daemon = True self._queue_management_thread.start() _threads_queues[self._queue_management_thread] = self._result_queue def _adjust_process_count(self): for _ in range(len(self._processes), self._max_workers): p = multiprocessing.Process( target=_process_worker, args=(self._call_queue, self._result_queue)) p.start() self._processes[p.pid] = p def submit(self, fn, *args, **kwargs): with self._shutdown_lock: if self._broken: raise BrokenProcessPool('A child process terminated ' 'abruptly, the process pool is not usable anymore') if self._shutdown_thread: raise RuntimeError('cannot schedule new futures after shutdown') f = _base.Future() w = _WorkItem(f, fn, args, kwargs) self._pending_work_items[self._queue_count] = w self._work_ids.put(self._queue_count) self._queue_count += 1 # Wake up queue management thread self._result_queue.put(None) self._start_queue_management_thread() return f submit.__doc__ = _base.Executor.submit.__doc__ def map(self, fn, *iterables, timeout=None, chunksize=1): """Returns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. chunksize: If greater than one, the iterables will be chopped into chunks of size chunksize and submitted to the process pool. If set to one, the items in the list will be sent one at a time. Returns: An iterator equivalent to: map(func, *iterables) but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If fn(*args) raises for any values. """ if chunksize < 1: raise ValueError("chunksize must be >= 1.") results = super().map(partial(_process_chunk, fn), _get_chunks(*iterables, chunksize=chunksize), timeout=timeout) return _chain_from_iterable_of_lists(results) def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown_thread = True if self._queue_management_thread: # Wake up queue management thread self._result_queue.put(None) if wait: self._queue_management_thread.join() # To reduce the risk of opening too many files, remove references to # objects that use file descriptors. self._queue_management_thread = None self._call_queue = None self._result_queue = None self._processes = None shutdown.__doc__ = _base.Executor.shutdown.__doc__ atexit.register(_python_exit) futures/__pycache__/_base.cpython-36.opt-2.pyc000064400000031254151026775010015121 0ustar003 \R @sPdZddlZddlZddlZddlZdZdZdZdZdZ dZ d Z d Z d Z e e e e e gZe d e d e de de diZejdZGdddeZGdddeZGdddeZGdddeZGdddeZGdddeZGdddeZGdd d eZd!d"Zd#d$Zd/d%d&Zejd'd(Z defd)d*Z!Gd+d,d,eZ"Gd-d.d.eZ#dS)0z"Brian Quinlan (brian@sweetapp.com)NFIRST_COMPLETEDFIRST_EXCEPTION ALL_COMPLETED _AS_COMPLETEDPENDINGRUNNING CANCELLEDCANCELLED_AND_NOTIFIEDFINISHEDpendingrunning cancelledfinishedzconcurrent.futuresc@s eZdZdS)ErrorN)__name__ __module__ __qualname__rr0/usr/lib64/python3.6/concurrent/futures/_base.pyr,src@s eZdZdS)CancelledErrorN)rrrrrrrr0src@s eZdZdS) TimeoutErrorN)rrrrrrrr4src@s,eZdZddZddZddZddZd S) _WaitercCstj|_g|_dS)N) threadingZEventeventfinished_futures)selfrrr__init__:s z_Waiter.__init__cCs|jj|dS)N)rappend)rfuturerrr add_result>sz_Waiter.add_resultcCs|jj|dS)N)rr)rrrrr add_exceptionAsz_Waiter.add_exceptioncCs|jj|dS)N)rr)rrrrr add_cancelledDsz_Waiter.add_cancelledN)rrrrrr r!rrrrr8srcs@eZdZfddZfddZfddZfddZZS) _AsCompletedWaitercstt|jtj|_dS)N)superr"rrLocklock)r) __class__rrrJsz_AsCompletedWaiter.__init__c s0|j tt|j||jjWdQRXdS)N)r%r#r"rrset)rr)r&rrrNsz_AsCompletedWaiter.add_resultc s0|j tt|j||jjWdQRXdS)N)r%r#r"r rr')rr)r&rrr Ssz _AsCompletedWaiter.add_exceptionc s0|j tt|j||jjWdQRXdS)N)r%r#r"r!rr')rr)r&rrr!Xsz _AsCompletedWaiter.add_cancelled)rrrrrr r! __classcell__rr)r&rr"Gs   r"cs4eZdZfddZfddZfddZZS)_FirstCompletedWaitercstj||jjdS)N)r#rrr')rr)r&rrr`s z _FirstCompletedWaiter.add_resultcstj||jjdS)N)r#r rr')rr)r&rrr ds z#_FirstCompletedWaiter.add_exceptioncstj||jjdS)N)r#r!rr')rr)r&rrr!hs z#_FirstCompletedWaiter.add_cancelled)rrrrr r!r(rr)r&rr)]s  r)csHeZdZfddZddZfddZfddZfd d ZZS) _AllCompletedWaitercs$||_||_tj|_tjdS)N)num_pending_callsstop_on_exceptionrr$r%r#r)rr+r,)r&rrros z_AllCompletedWaiter.__init__c Cs4|j$|jd8_|js&|jjWdQRXdS)N)r%r+rr')rrrr_decrement_pending_callsusz,_AllCompletedWaiter._decrement_pending_callscstj||jdS)N)r#rr.)rr)r&rrr{s z_AllCompletedWaiter.add_resultcs*tj||jr|jjn|jdS)N)r#r r,rr'r.)rr)r&rrr s  z!_AllCompletedWaiter.add_exceptioncstj||jdS)N)r#r!r.)rr)r&rrr!s z!_AllCompletedWaiter.add_cancelled) rrrrr.rr r!r(rr)r&rr*ls    r*c@s$eZdZddZddZddZdS)_AcquireFuturescCst|td|_dS)N)key)sortedidfutures)rr3rrrrsz_AcquireFutures.__init__cCsx|jD]}|jjqWdS)N)r3 _conditionacquire)rrrrr __enter__s z_AcquireFutures.__enter__cGsx|jD]}|jjqWdS)N)r3r4release)rargsrrrr__exit__s z_AcquireFutures.__exit__N)rrrrr6r9rrrrr/sr/cCs|tkrt}nZ|tkr t}nJtdd|D}|tkrHt|dd}n"|tkr^t|dd}n td|x|D]}|j j |qpW|S)Ncss|]}|jttgkVqdS)N)_stater r ).0frrr sz._create_and_install_waiters..T)r,FzInvalid return condition: %r) rr"rr)sumrr*r ValueError_waitersr)fs return_whenwaiterZ pending_countr<rrr_create_and_install_waiterss  rDc csXxR|rR|d}x|D]}|j|qW|j|jj|WdQRX~|jVqWdS)Nr-)remover4r@pop)rArC ref_collectr<Z futures_setrrr_yield_finished_futuress  rIc csJ|dk r|tj}t|}t|}t|*tdd|D}||}t|t}WdQRXt|}zt|||fdEdHx|r|dkrd}n(|tj}|dkrt dt||f|j j ||j |j }g|_ |j jWdQRX|jt||||fdEdHq~WWdx,|D]$}|j|jj|WdQRXqWXdS)Ncss |]}|jttgkr|VqdS)N)r:r r )r;r<rrrr=szas_completed..)rHrz%d (of %d) futures unfinished)time monotonicr'lenr/rDrlistrIrrwaitr%rclearreverser4r@rF) rAtimeoutend_timeZ total_futuresrr rCZ wait_timeoutr<rrr as_completeds@     rSDoneAndNotDoneFuturesz done not_donecCst|tdd|D}t||}|tkr>|r>t||S|tkrf|rftdd|Drft||St|t|krt||St||}WdQRX|jj |x*|D]"}|j |j j |WdQRXqW|j |jt|t||S)Ncss |]}|jttgkr|VqdS)N)r:r r )r;r<rrrr=szwait..css(|] }|j r|jdk r|VqdS)N)r exception)r;r<rrrr=$s)r/r'rrTranyrLrDrrNr4r@rFupdater)rArQrBdoneZnot_donerCr<rrrrNs"          rNc@seZdZddZddZddZddZd d Zd d Zd dZ ddZ ddZ dddZ dddZ ddZddZddZdS) FuturecCs,tj|_t|_d|_d|_g|_g|_dS)N) rZ Conditionr4rr:_result _exceptionr@_done_callbacks)rrrrr8s  zFuture.__init__c CsBx<|jD]2}y ||Wqtk r8tjd|YqXqWdS)Nz!exception calling callback for %r)r\ ExceptionLOGGERrU)rcallbackrrr_invoke_callbacksAs   zFuture._invoke_callbacksc Cs|jv|jtkr`|jrz <%s at %#x state=%s returned %s>z<%s at %#x state=%s>) r4r:r r[r&rr2_STATE_TO_DESCRIPTION_MAPrZ)rrrr__repr__Hs" zFuture.__repr__c CsR|j:|jttgkrdS|jttgkr,dSt|_|jjWdQRX|jdS)NFT)r4r:rr rr notify_allr`)rrrrcancel\sz Future.cancelc Cs |j|jttgkSQRXdS)N)r4r:rr )rrrrr oszFuture.cancelledc Cs|j |jtkSQRXdS)N)r4r:r)rrrrr tszFuture.runningc Cs"|j|jtttgkSQRXdS)N)r4r:rr r )rrrrrXysz Future.donecCs|jr|jn|jSdS)N)r[rZ)rrrrZ __get_result~szFuture.__get_resultc Cs>|j&|jtttgkr(|jj|dSWdQRX||dS)N)r4r:rr r r\r)rfnrrradd_done_callbacks  zFuture.add_done_callbackNc Csx|jh|jttgkrtn|jtkr0|jS|jj||jttgkrRtn|jtkrd|jStWdQRXdS)N) r4r:rr rr _Future__get_resultrNr)rrQrrrresults   z Future.resultc Cst|jd|jttgkrtn|jtkr.|jS|jj||jttgkrPtn|jtkr`|jStWdQRXdS)N) r4r:rr rr r[rNr)rrQrrrrUs   zFuture.exceptionc Cst|jd|jtkr6t|_x|jD]}|j|q WdS|jtkrJt|_dStj dt ||jt dWdQRXdS)NFTz!Future %s in unexpected state: %szFuture in unexpected state) r4r:rr r@r!rrr^Zcriticalr2 RuntimeError)rrCrrrset_running_or_notify_cancels   z#Future.set_running_or_notify_cancelc CsN|j6||_t|_x|jD]}|j|qW|jjWdQRX|jdS)N)r4rZr r:r@rrcr`)rrhrCrrr set_results zFuture.set_resultc CsN|j6||_t|_x|jD]}|j|qW|jjWdQRX|jdS)N)r4r[r r:r@r rcr`)rrUrCrrr set_exception s zFuture.set_exception)N)N)rrrrr`rbrdr r rXrgrfrhrUrjrkrlrrrrrY5s   "( rYc@s>eZdZddZdddddZdd d Zd d Zd dZdS)ExecutorcOs tdS)N)NotImplementedError)rrer8kwargsrrrsubmits zExecutor.submitNr-)rQ chunksizecsBdk rtjfddt|Dfdd}|S)Ncsg|]}jf|qSr)rp)r;r8)rerrr ?sz Executor.map..c 3sdzFjx8rBdkr(jjVq jjtjVq WWdxD] }|jqNWXdS)N)rPrGrhrJrKrd)r)rRrArQrrresult_iteratorCs  z%Executor.map..result_iterator)rJrKzip)rrerQrq iterablesrsr)rRrerArrQrmap&s   z Executor.mapTcCsdS)Nr)rrNrrrshutdownRs zExecutor.shutdowncCs|S)Nr)rrrrr6_szExecutor.__enter__cCs|jdddS)NT)rNF)rw)rexc_typeZexc_valZexc_tbrrrr9bs zExecutor.__exit__)T)rrrrprvrwr6r9rrrrrms  , rm)N)$ __author__ collectionsZloggingrrJrrrrrrrr r Z_FUTURE_STATESraZ getLoggerr^r]rrrobjectrr"r)r*r/rDrIrS namedtuplerTrNrYrmrrrrsR  >1dfutures/__pycache__/__init__.cpython-36.pyc000064400000001202151026775010014635 0ustar003 \ @sPdZdZddlmZmZmZmZmZmZm Z m Z m Z ddl m Z ddlmZdS)z?Execute computations asynchronously using threads or processes.z"Brian Quinlan (brian@sweetapp.com)) FIRST_COMPLETEDFIRST_EXCEPTION ALL_COMPLETEDCancelledError TimeoutErrorFutureExecutorwait as_completed)ProcessPoolExecutor)ThreadPoolExecutorN)__doc__ __author__Zconcurrent.futures._baserrrrrrrr r Zconcurrent.futures.processr Zconcurrent.futures.threadr rr3/usr/lib64/python3.6/concurrent/futures/__init__.pys, futures/__pycache__/__init__.cpython-36.opt-1.pyc000064400000001202151026775010015574 0ustar003 \ @sPdZdZddlmZmZmZmZmZmZm Z m Z m Z ddl m Z ddlmZdS)z?Execute computations asynchronously using threads or processes.z"Brian Quinlan (brian@sweetapp.com)) FIRST_COMPLETEDFIRST_EXCEPTION ALL_COMPLETEDCancelledError TimeoutErrorFutureExecutorwait as_completed)ProcessPoolExecutor)ThreadPoolExecutorN)__doc__ __author__Zconcurrent.futures._baserrrrrrrr r Zconcurrent.futures.processr Zconcurrent.futures.threadr rr3/usr/lib64/python3.6/concurrent/futures/__init__.pys, futures/__pycache__/thread.cpython-36.pyc000064400000007512151026775010014357 0ustar003 \@sdZdZddlZddlmZddlZddlZddlZddlZddl Z ej Z da ddZ eje Gdd d eZd d ZGd d d ejZdS)zImplements ThreadPoolExecutor.z"Brian Quinlan (brian@sweetapp.com)N)_baseFcCsJdattj}x|D]\}}|jdqWx|D]\}}|jq2WdS)NT) _shutdownlist_threads_queuesitemsputjoin)rtqr 1/usr/lib64/python3.6/concurrent/futures/thread.py _python_exit!s  r c@seZdZddZddZdS) _WorkItemcCs||_||_||_||_dS)N)futurefnargskwargs)selfrrrrr r r __init__-sz_WorkItem.__init__cCsf|jjsdSy|j|j|j}Wn2tk rT}z|jj|d}WYdd}~XnX|jj|dS)N)rZset_running_or_notify_cancelrrr BaseExceptionZ set_exceptionZ set_result)rresultexcr r r run3s  z _WorkItem.runN)__name__ __module__ __qualname__rrr r r r r,src Cs|yRxL|jdd}|dk r$|j~q|}ts<|dks<|jrJ|jddS~qWWn$tk rvtjjdddYnXdS)NT)blockzException in worker)exc_info)getrrrrrZLOGGERZcritical)Zexecutor_referenceZ work_queueZ work_itemZexecutorr r r _worker@s   rc@sReZdZejjZd ddZddZe j jj e_ ddZ d d d Z e j j j e _ dS)ThreadPoolExecutorNcCsf|dkrtjpdd}|dkr(td||_tj|_t|_d|_ t j |_ |p^d|j |_dS)aInitializes a new ThreadPoolExecutor instance. Args: max_workers: The maximum number of threads that can be used to execute the given calls. thread_name_prefix: An optional name prefix to give our threads. Nrz"max_workers must be greater than 0FzThreadPoolExecutor-%d)os cpu_count ValueError _max_workersqueueZQueue _work_queueset_threadsr threadingZLock_shutdown_lock_counter_thread_name_prefix)rZ max_workersZthread_name_prefixr r r r[s  zThreadPoolExecutor.__init__c OsN|j>|jrtdtj}t||||}|jj||j|SQRXdS)Nz*cannot schedule new futures after shutdown) r-r RuntimeErrorrZFuturerr)r_adjust_thread_count)rrrrfwr r r submitrs zThreadPoolExecutor.submitcCsz|jfdd}t|j}||jkrvd|jp,||f}tj|ttj |||jfd}d|_ |j |jj ||jt |<dS)NcSs|jddS)N)r)_r r r r weakref_cbsz;ThreadPoolExecutor._adjust_thread_count..weakref_cbz%s_%d)nametargetrT)r)lenr+r'r/r,ZThreadrweakrefrefZdaemonstartaddr)rr6Z num_threadsZ thread_namer r r r r1s      z'ThreadPoolExecutor._adjust_thread_countTc CsD|jd|_|jjdWdQRX|r@x|jD] }|jq0WdS)NT)r-rr)rr+r)rwaitr r r r shutdowns  zThreadPoolExecutor.shutdown)Nr!)T)rrr itertoolscount__next__r.rr4rExecutor__doc__r1r?r r r r r Vs    r )rD __author__atexitZconcurrent.futuresrr@r(r,r:r$WeakKeyDictionaryrrr registerobjectrrrCr r r r r s  futures/__pycache__/thread.cpython-36.opt-2.pyc000064400000007023151026775010015314 0ustar003 \@sdZddlZddlmZddlZddlZddlZddlZddlZej Z da ddZ ej e GdddeZd d ZGd d d ejZdS) z"Brian Quinlan (brian@sweetapp.com)N)_baseFcCsJdattj}x|D]\}}|jdqWx|D]\}}|jq2WdS)NT) _shutdownlist_threads_queuesitemsputjoin)rtqr 1/usr/lib64/python3.6/concurrent/futures/thread.py _python_exit!s  r c@seZdZddZddZdS) _WorkItemcCs||_||_||_||_dS)N)futurefnargskwargs)selfrrrrr r r __init__-sz_WorkItem.__init__cCsf|jjsdSy|j|j|j}Wn2tk rT}z|jj|d}WYdd}~XnX|jj|dS)N)rZset_running_or_notify_cancelrrr BaseExceptionZ set_exceptionZ set_result)rresultexcr r r run3s  z _WorkItem.runN)__name__ __module__ __qualname__rrr r r r r,src Cs|yRxL|jdd}|dk r$|j~q|}ts<|dks<|jrJ|jddS~qWWn$tk rvtjjdddYnXdS)NT)blockzException in worker)exc_info)getrrrrrZLOGGERZcritical)Zexecutor_referenceZ work_queueZ work_itemZexecutorr r r _worker@s   rc@sReZdZejjZd ddZddZe j jj e_ ddZ d d d Z e j j j e _ dS)ThreadPoolExecutorNcCsf|dkrtjpdd}|dkr(td||_tj|_t|_d|_ t j |_ |p^d|j |_dS)Nrz"max_workers must be greater than 0FzThreadPoolExecutor-%d)os cpu_count ValueError _max_workersqueueZQueue _work_queueset_threadsr threadingZLock_shutdown_lock_counter_thread_name_prefix)rZ max_workersZthread_name_prefixr r r r[s  zThreadPoolExecutor.__init__c OsN|j>|jrtdtj}t||||}|jj||j|SQRXdS)Nz*cannot schedule new futures after shutdown) r-r RuntimeErrorrZFuturerr)r_adjust_thread_count)rrrrfwr r r submitrs zThreadPoolExecutor.submitcCsz|jfdd}t|j}||jkrvd|jp,||f}tj|ttj |||jfd}d|_ |j |jj ||jt |<dS)NcSs|jddS)N)r)_r r r r weakref_cbsz;ThreadPoolExecutor._adjust_thread_count..weakref_cbz%s_%d)nametargetrT)r)lenr+r'r/r,ZThreadrweakrefrefZdaemonstartaddr)rr6Z num_threadsZ thread_namer r r r r1s      z'ThreadPoolExecutor._adjust_thread_countTc CsD|jd|_|jjdWdQRX|r@x|jD] }|jq0WdS)NT)r-rr)rr+r)rwaitr r r r shutdowns  zThreadPoolExecutor.shutdown)Nr!)T)rrr itertoolscount__next__r.rr4rExecutor__doc__r1r?r r r r r Vs    r ) __author__atexitZconcurrent.futuresrr@r(r,r:r$WeakKeyDictionaryrrr registerobjectrrrCr r r r r s  futures/__pycache__/process.cpython-36.opt-1.pyc000064400000036661151026775010015534 0ustar003 \ P@sbdZdZddlZddlZddlmZddlZddlmZddlZddlm Z ddl m Z ddl Z ddl Z ddlmZddlZddlZe jZd ad d Zd ZGd ddeZGdddZddZGdddeZGdddeZGdddeZddZddZ ddZ!dd Z"d!d"Z#d a$da%d#d$Z&d%d&Z'Gd'd(d(e(Z)Gd)d*d*ej*Z+ej,edS)+a* Implements ProcessPoolExecutor. The follow diagram and text describe the data-flow through the system: |======================= In-process =====================|== Out-of-process ==| +----------+ +----------+ +--------+ +-----------+ +---------+ | | => | Work Ids | => | | => | Call Q | => | | | | +----------+ | | +-----------+ | | | | | ... | | | | ... | | | | | | 6 | | | | 5, call() | | | | | | 7 | | | | ... | | | | Process | | ... | | Local | +-----------+ | Process | | Pool | +----------+ | Worker | | #1..n | | Executor | | Thread | | | | | +----------- + | | +-----------+ | | | | <=> | Work Items | <=> | | <= | Result Q | <= | | | | +------------+ | | +-----------+ | | | | | 6: call() | | | | ... | | | | | | future | | | | 4, result | | | | | | ... | | | | 3, except | | | +----------+ +------------+ +--------+ +-----------+ +---------+ Executor.submit() called: - creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict - adds the id of the _WorkItem to the "Work Ids" queue Local worker thread: - reads work ids from the "Work Ids" queue and looks up the corresponding WorkItem from the "Work Items" dict: if the work item has been cancelled then it is simply removed from the dict, otherwise it is repackaged as a _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). - reads _ResultItems from "Result Q", updates the future stored in the "Work Items" dict and deletes the dict entry Process #1..n: - reads _CallItems from "Call Q", executes the calls, and puts the resulting _ResultItems in "Result Q" z"Brian Quinlan (brian@sweetapp.com)N)_base)Full) SimpleQueue)wait)partialFcCsJdattj}x|D]\}}|jdqWx|D]\}}|jq2WdS)NT) _shutdownlist_threads_queuesitemsputjoin)r tqr2/usr/lib64/python3.6/concurrent/futures/process.py _python_exitOs  rc@seZdZddZddZdS)_RemoteTracebackcCs ||_dS)N)tb)selfrrrr__init__asz_RemoteTraceback.__init__cCs|jS)N)r)rrrr__str__csz_RemoteTraceback.__str__N)__name__ __module__ __qualname__rrrrrrr`src@seZdZddZddZdS)_ExceptionWithTracebackcCs0tjt|||}dj|}||_d||_dS)Nz """ %s""") tracebackformat_exceptiontyper excr)rr rrrrrgs z _ExceptionWithTraceback.__init__cCst|j|jffS)N) _rebuild_excr r)rrrr __reduce__lsz"_ExceptionWithTraceback.__reduce__N)rrrrr"rrrrrfsrcCst||_|S)N)r __cause__)r rrrrr!os r!c@seZdZddZdS) _WorkItemcCs||_||_||_||_dS)N)futurefnargskwargs)rr%r&r'r(rrrrtsz_WorkItem.__init__N)rrrrrrrrr$ssr$c@seZdZdddZdS) _ResultItemNcCs||_||_||_dS)N)work_id exceptionresult)rr*r+r,rrrr{sz_ResultItem.__init__)NN)rrrrrrrrr)zsr)c@seZdZddZdS) _CallItemcCs||_||_||_||_dS)N)r*r&r'r()rr*r&r'r(rrrrsz_CallItem.__init__N)rrrrrrrrr-sr-cgs0t|}x"ttj||}|s"dS|Vq WdS)z, Iterates over zip()ed iterables in chunks. N)ziptuple itertoolsislice) chunksize iterablesitchunkrrr _get_chunkss r6csfdd|DS)z Processes a chunk of an iterable passed to map. Runs the function passed to map() on a chunk of the iterable passed to map. This function is run in a separate process. csg|] }|qSrr).0r')r&rr sz"_process_chunk..r)r&r5r)r&r_process_chunks r9cCsx|jdd}|dkr(|jtjdSy|j|j|j}WnBtk r~}z&t||j }|jt |j |dWYdd}~XqX|jt |j |dqWdS)aEvaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty. T)blockN)r+)r,) getr osgetpidr&r'r( BaseExceptionr __traceback__r)r*) call_queue result_queueZ call_itemrer rrr_process_workers   & rDcCsxxr|jrdSy|jdd}Wntjk r4dSX||}|jjrh|jt||j|j |j ddq||=qqWdS)aMFills call_queue with _WorkItems from pending_work_items. This function never blocks. Args: pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids are consumed and the corresponding _WorkItems from pending_work_items are transformed into _CallItems and put in call_queue. call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems. NF)r:T) Zfullr;queueZEmptyr%Zset_running_or_notify_cancelr r-r&r'r()pending_work_itemsZwork_idsr@r* work_itemrrr_add_call_item_to_queues   rHc sdfdd}fdd}|j}xlt||ddjD} t|g| } || krf|j} nr|dk rd_d_dx&|jD]\} } | jj t d ~ qW|j xjD] }|j qW|dSt | trj| }|jsR|dSnJ| dk rR|j| jd} | dk rR| jrB| jj | jn| jj| j~ ||ry|sr|dSWntk rYnXdq(WdS) aManages the communication between this process and the worker processes. This function is run in a local thread. Args: executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. process: A list of the multiprocessing.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. result_queue: A multiprocessing.Queue of _ResultItems generated by the process workers. NcstpdkpjS)N)r_shutdown_threadr)executorrr shutting_downsz/_queue_management_worker..shutting_downcsZtddjD}xtd|D]}jdq"WjxjD] }|jqFWdS)Ncss|]}|jVqdS)N)Zis_alive)r7prrr szD_queue_management_worker..shutdown_worker..r)sumvaluesrangeZ put_nowaitcloser )Znb_children_aliveirL)r@ processesrrshutdown_workers z1_queue_management_worker..shutdown_workercSsg|] }|jqSr)sentinel)r7rLrrrr8 sz,_queue_management_worker..Tz^A process in the process pool was terminated abruptly while the future was running or pending.)Z_readerrHrOrZrecv_brokenrIr r%Z set_exceptionBrokenProcessPoolclearZ terminate isinstanceintpopr r*r+Z set_resultr,r)Zexecutor_referencerSrFZwork_ids_queuer@rArKrTreaderZ sentinelsZreadyZ result_itemr*rGrLr)r@rJrSr_queue_management_workersb        r]c Cshtrtrttdaytjd}Wnttfk r:dSX|dkrHdS|dkrTdSd|attdS)NTSC_SEM_NSEMS_MAXrz@system provides too few semaphores (%d available, 256 necessary))_system_limits_checked_system_limitedNotImplementedErrorr<sysconfAttributeError ValueError)Z nsems_maxrrr_check_system_limitsQsrgccs.x(|D] }|jx|r$|jVqWqWdS)z Specialized implementation of itertools.chain.from_iterable. Each item in *iterable* should be a list. This function is careful not to keep references to yielded objects. N)reverser[)iterableelementrrr_chain_from_iterable_of_listshs rkc@seZdZdZdS)rWzy Raised when a process in a ProcessPoolExecutor terminated abruptly while a future was in the running state. N)rrr__doc__rrrrrWtsrWcsheZdZdddZddZddZdd Zejjj e_ dd d fd d Z dddZ ejj j e _ Z S)ProcessPoolExecutorNcCst|dkrtjpd|_n|dkr.td||_tj|jt|_d|j_ t |_ t j|_ d|_i|_d|_tj|_d|_d|_i|_dS)a/Initializes a new ProcessPoolExecutor instance. Args: max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors. Nrrz"max_workers must be greater than 0TF)rgr< cpu_count _max_workersrfmultiprocessingZQueueEXTRA_QUEUED_CALLS _call_queueZ _ignore_epiper _result_queuerE _work_ids_queue_management_thread _processesrI threadingZLock_shutdown_lockrV _queue_count_pending_work_items)rZ max_workersrrrr|s$   zProcessPoolExecutor.__init__cCsp|jfdd}|jdkrl|jtjttj|||j|j |j |j |jfd|_d|j_ |jj |jt|j<dS)NcSs|jddS)N)r )_rrrr weakref_cbszFProcessPoolExecutor._start_queue_management_thread..weakref_cb)targetr'T)rsru_adjust_process_countrwZThreadr]weakrefrefrvrzrtrrZdaemonstartr )rr|rrr_start_queue_management_threads   z2ProcessPoolExecutor._start_queue_management_threadcCsJxDtt|j|jD].}tjt|j|jfd}|j ||j|j <qWdS)N)r}r') rPlenrvrorpZProcessrDrrrsrpid)rr{rLrrrr~s z)ProcessPoolExecutor._adjust_process_countc Os|jt|jrtd|jr$tdtj}t||||}||j|j <|j j |j |j d7_ |j j d|j |SQRXdS)NzKA child process terminated abruptly, the process pool is not usable anymorez*cannot schedule new futures after shutdownr)rxrVrWrI RuntimeErrorrZFuturer$rzryrtr rsr)rr&r'r(fwrrrsubmits  zProcessPoolExecutor.submitr)timeoutr2cs:|dkrtdtjtt|t|d|i|d}t|S)ajReturns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. chunksize: If greater than one, the iterables will be chopped into chunks of size chunksize and submitted to the process pool. If set to one, the items in the list will be sent one at a time. Returns: An iterator equivalent to: map(func, *iterables) but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If fn(*args) raises for any values. rzchunksize must be >= 1.r2)r)rfsupermaprr9r6rk)rr&rr2r3Zresults) __class__rrrs  zProcessPoolExecutor.mapTc CsT|j d|_WdQRX|jr8|jjd|r8|jjd|_d|_d|_d|_dS)NT)rxrIrursr r rrrv)rrrrrshutdowns  zProcessPoolExecutor.shutdown)N)T) rrrrrr~rrExecutorrlrr __classcell__rr)rrrm{s (   rm)-rl __author__atexitr<Zconcurrent.futuresrrErrprZmultiprocessing.connectionrrwr functoolsrr0rWeakKeyDictionaryr rrrq Exceptionrrr!objectr$r)r-r6r9rDrHr]rarbrgrkrrWrrmregisterrrrr,sJ         %s  futures/__pycache__/_base.cpython-36.opt-1.pyc000064400000050232151026775010015115 0ustar003 \R @sPdZddlZddlZddlZddlZdZdZdZdZdZ dZ d Z d Z d Z e e e e e gZe d e d e de de diZejdZGdddeZGdddeZGdddeZGdddeZGdddeZGdddeZGdddeZGdd d eZd!d"Zd#d$Zd/d%d&Zejd'd(Z defd)d*Z!Gd+d,d,eZ"Gd-d.d.eZ#dS)0z"Brian Quinlan (brian@sweetapp.com)NFIRST_COMPLETEDFIRST_EXCEPTION ALL_COMPLETED _AS_COMPLETEDPENDINGRUNNING CANCELLEDCANCELLED_AND_NOTIFIEDFINISHEDpendingrunning cancelledfinishedzconcurrent.futuresc@seZdZdZdS)Errorz-Base class for all future-related exceptions.N)__name__ __module__ __qualname____doc__rr0/usr/lib64/python3.6/concurrent/futures/_base.pyr,src@seZdZdZdS)CancelledErrorzThe Future was cancelled.N)rrrrrrrrr0src@seZdZdZdS) TimeoutErrorz*The operation exceeded the given deadline.N)rrrrrrrrr4src@s0eZdZdZddZddZddZdd Zd S) _Waiterz;Provides the event that wait() and as_completed() block on.cCstj|_g|_dS)N) threadingZEventeventfinished_futures)selfrrr__init__:s z_Waiter.__init__cCs|jj|dS)N)rappend)rfuturerrr add_result>sz_Waiter.add_resultcCs|jj|dS)N)rr)rrrrr add_exceptionAsz_Waiter.add_exceptioncCs|jj|dS)N)rr)rrrrr add_cancelledDsz_Waiter.add_cancelledN)rrrrrr r!r"rrrrr8s rcsDeZdZdZfddZfddZfddZfdd ZZS) _AsCompletedWaiterzUsed by as_completed().cstt|jtj|_dS)N)superr#rrLocklock)r) __class__rrrJsz_AsCompletedWaiter.__init__c s0|j tt|j||jjWdQRXdS)N)r&r$r#r rset)rr)r'rrr Nsz_AsCompletedWaiter.add_resultc s0|j tt|j||jjWdQRXdS)N)r&r$r#r!rr()rr)r'rrr!Ssz _AsCompletedWaiter.add_exceptionc s0|j tt|j||jjWdQRXdS)N)r&r$r#r"rr()rr)r'rrr"Xsz _AsCompletedWaiter.add_cancelled) rrrrrr r!r" __classcell__rr)r'rr#Gs    r#cs8eZdZdZfddZfddZfddZZS)_FirstCompletedWaiterz*Used by wait(return_when=FIRST_COMPLETED).cstj||jjdS)N)r$r rr()rr)r'rrr `s z _FirstCompletedWaiter.add_resultcstj||jjdS)N)r$r!rr()rr)r'rrr!ds z#_FirstCompletedWaiter.add_exceptioncstj||jjdS)N)r$r"rr()rr)r'rrr"hs z#_FirstCompletedWaiter.add_cancelled)rrrrr r!r"r)rr)r'rr*]s  r*csLeZdZdZfddZddZfddZfdd Zfd d ZZ S) _AllCompletedWaiterzsz._create_and_install_waiters..T)r-FzInvalid return condition: %r) rr#rr*sumrr+r ValueError_waitersr)fs return_whenwaiterZ pending_countr=rrr_create_and_install_waiterss  rEc csXxR|rR|d}x|D]}|j|qW|j|jj|WdQRX~|jVqWdS)a~ Iterate on the list *fs*, yielding finished futures one by one in reverse order. Before yielding a future, *waiter* is removed from its waiters and the future is removed from each set in the collection of sets *ref_collect*. The aim of this function is to avoid keeping stale references after the future is yielded and before the iterator resumes. r.N)remover5rApop)rBrD ref_collectr=Z futures_setrrr_yield_finished_futuress  rJc csJ|dk r|tj}t|}t|}t|*tdd|D}||}t|t}WdQRXt|}zt|||fdEdHx|r|dkrd}n(|tj}|dkrt dt||f|j j ||j |j }g|_ |j jWdQRX|jt||||fdEdHq~WWdx,|D]$}|j|jj|WdQRXqWXdS)anAn iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). If any given Futures are duplicated, they will be returned once. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Ncss |]}|jttgkr|VqdS)N)r;r r )r<r=rrrr>szas_completed..)rIrz%d (of %d) futures unfinished)time monotonicr(lenr0rErlistrJrrwaitr&rclearreverser5rArG) rBtimeoutend_timeZ total_futuresrr rDZ wait_timeoutr=rrr as_completeds@     rTDoneAndNotDoneFuturesz done not_donecCst|tdd|D}t||}|tkr>|r>t||S|tkrf|rftdd|Drft||St|t|krt||St||}WdQRX|jj |x*|D]"}|j |j j |WdQRXqW|j |jt|t||S)aWait for the futures in the given sequence to complete. Args: fs: The sequence of Futures (possibly created by different Executors) to wait upon. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. return_when: Indicates when this function should return. The options are: FIRST_COMPLETED - Return when any future finishes or is cancelled. FIRST_EXCEPTION - Return when any future finishes by raising an exception. If no future raises an exception then it is equivalent to ALL_COMPLETED. ALL_COMPLETED - Return when all futures finish or are cancelled. Returns: A named 2-tuple of sets. The first set, named 'done', contains the futures that completed (is finished or cancelled) before the wait completed. The second set, named 'not_done', contains uncompleted futures. css |]}|jttgkr|VqdS)N)r;r r )r<r=rrrr>szwait..css(|] }|j r|jdk r|VqdS)N)r exception)r<r=rrrr>$sN)r0r(rrUranyrMrErrOr5rArGupdater)rBrRrCdoneZnot_donerDr=rrrrOs"          rOc@seZdZdZddZddZddZdd Zd d Zd d Z ddZ ddZ ddZ dddZ d ddZddZddZddZdS)!Futurez5Represents the result of an asynchronous computation.cCs,tj|_t|_d|_d|_g|_g|_dS)z8Initializes the future. Should not be called by clients.N) rZ Conditionr5rr;_result _exceptionrA_done_callbacks)rrrrr8s  zFuture.__init__c CsBx<|jD]2}y ||Wqtk r8tjd|YqXqWdS)Nz!exception calling callback for %r)r] ExceptionLOGGERrV)rcallbackrrr_invoke_callbacksAs   zFuture._invoke_callbacksc Cs|jv|jtkr`|jrz <%s at %#x state=%s returned %s>z<%s at %#x state=%s>) r5r;r r\r'rr3_STATE_TO_DESCRIPTION_MAPr[)rrrr__repr__Hs" zFuture.__repr__c CsR|j:|jttgkrdS|jttgkr,dSt|_|jjWdQRX|jdS)zCancel the future if possible. Returns True if the future was cancelled, False otherwise. A future cannot be cancelled if it is running or has already completed. FTN)r5r;rr rr notify_allra)rrrrcancel\sz Future.cancelc Cs |j|jttgkSQRXdS)z(Return True if the future was cancelled.N)r5r;rr )rrrrr oszFuture.cancelledc Cs|j |jtkSQRXdS)z1Return True if the future is currently executing.N)r5r;r)rrrrr tszFuture.runningc Cs"|j|jtttgkSQRXdS)z>Return True of the future was cancelled or finished executing.N)r5r;rr r )rrrrrYysz Future.donecCs|jr|jn|jSdS)N)r\r[)rrrrZ __get_result~szFuture.__get_resultc Cs>|j&|jtttgkr(|jj|dSWdQRX||dS)a%Attaches a callable that will be called when the future finishes. Args: fn: A callable that will be called with this future as its only argument when the future completes or is cancelled. The callable will always be called by a thread in the same process in which it was added. If the future has already completed or been cancelled then the callable will be called immediately. These callables are called in the order that they were added. N)r5r;rr r r]r)rfnrrradd_done_callbacks  zFuture.add_done_callbackNc Csx|jh|jttgkrtn|jtkr0|jS|jj||jttgkrRtn|jtkrd|jStWdQRXdS)aBReturn the result of the call that the future represents. Args: timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time. Returns: The result of the call that the future represents. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. Exception: If the call raised then that exception will be raised. N) r5r;rr rr _Future__get_resultrOr)rrRrrrresults   z Future.resultc Cst|jd|jttgkrtn|jtkr.|jS|jj||jttgkrPtn|jtkr`|jStWdQRXdS)aUReturn the exception raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. N) r5r;rr rr r\rOr)rrRrrrrVs   zFuture.exceptionc Cst|jd|jtkr6t|_x|jD]}|j|q WdS|jtkrJt|_dStj dt ||jt dWdQRXdS)aMark the future as running or process any cancel notifications. Should only be used by Executor implementations and unit tests. If the future has been cancelled (cancel() was called and returned True) then any threads waiting on the future completing (though calls to as_completed() or wait()) are notified and False is returned. If the future was not cancelled then it is put in the running state (future calls to running() will return True) and True is returned. This method should be called by Executor implementations before executing the work associated with this future. If this method returns False then the work should not be executed. Returns: False if the Future was cancelled, True otherwise. Raises: RuntimeError: if this method was already called or if set_result() or set_exception() was called. FTz!Future %s in unexpected state: %szFuture in unexpected stateN) r5r;rr rAr"rrr_Zcriticalr3 RuntimeError)rrDrrrset_running_or_notify_cancels   z#Future.set_running_or_notify_cancelc CsN|j6||_t|_x|jD]}|j|qW|jjWdQRX|jdS)zSets the return value of work associated with the future. Should only be used by Executor implementations and unit tests. N)r5r[r r;rAr rdra)rrirDrrr set_results zFuture.set_resultc CsN|j6||_t|_x|jD]}|j|qW|jjWdQRX|jdS)zSets the result of the future as being the given exception. Should only be used by Executor implementations and unit tests. N)r5r\r r;rAr!rdra)rrVrDrrr set_exception s zFuture.set_exception)N)N)rrrrrrarcrer r rYrhrgrirVrkrlrmrrrrrZ5s   "( rZc@sBeZdZdZddZdddddZdd d Zd d ZddZdS)ExecutorzCThis is an abstract base class for concrete asynchronous executors.cOs tdS)a Submits a callable to be executed with the given arguments. Schedules the callable to be executed as fn(*args, **kwargs) and returns a Future instance representing the execution of the callable. Returns: A Future representing the given call. N)NotImplementedError)rrfr9kwargsrrrsubmits zExecutor.submitNr.)rR chunksizecsBdk rtjfddt|Dfdd}|S)a}Returns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. chunksize: The size of the chunks the iterable will be broken into before being passed to a child process. This argument is only used by ProcessPoolExecutor; it is ignored by ThreadPoolExecutor. Returns: An iterator equivalent to: map(func, *iterables) but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If fn(*args) raises for any values. Ncsg|]}jf|qSr)rq)r<r9)rfrrr ?sz Executor.map..c 3sdzFjx8rBdkr(jjVq jjtjVq WWdxD] }|jqNWXdS)N)rQrHrirKrLre)r)rSrBrRrrresult_iteratorCs  z%Executor.map..result_iterator)rKrLzip)rrfrRrr iterablesrtr)rSrfrBrrRrmap&s   z Executor.mapTcCsdS)aClean-up the resources associated with the Executor. It is safe to call this method several times. Otherwise, no other methods can be called after this one. Args: wait: If True then shutdown will not return until all running futures have finished executing and the resources used by the executor have been reclaimed. Nr)rrOrrrshutdownRs zExecutor.shutdowncCs|S)Nr)rrrrr7_szExecutor.__enter__cCs|jdddS)NT)rOF)rx)rexc_typeZexc_valZexc_tbrrrr:bs zExecutor.__exit__)T) rrrrrqrwrxr7r:rrrrrns  , rn)N)$ __author__ collectionsZloggingrrKrrrrrrrr r Z_FUTURE_STATESrbZ getLoggerr_r^rrrobjectrr#r*r+r0rErJrT namedtuplerUrOrZrnrrrrsR  >1dfutures/__pycache__/__init__.cpython-36.opt-2.pyc000064400000001062151026775010015601 0ustar003 \ @sLdZddlmZmZmZmZmZmZmZm Z m Z ddl m Z ddl mZdS)z"Brian Quinlan (brian@sweetapp.com)) FIRST_COMPLETEDFIRST_EXCEPTION ALL_COMPLETEDCancelledError TimeoutErrorFutureExecutorwait as_completed)ProcessPoolExecutor)ThreadPoolExecutorN) __author__Zconcurrent.futures._baserrrrrrrr r Zconcurrent.futures.processr Zconcurrent.futures.threadr rr3/usr/lib64/python3.6/concurrent/futures/__init__.pys, futures/__pycache__/process.cpython-36.opt-2.pyc000064400000022702151026775010015524 0ustar003 \ P@s^dZddlZddlZddlmZddlZddlmZddlZddlmZddl m Z ddl Z ddl Z ddl mZddlZddlZe jZdad d Zd ZGd d d eZGdddZddZGdddeZGdddeZGdddeZddZddZddZ ddZ!d d!Z"da#da$d"d#Z%d$d%Z&Gd&d'd'e'Z(Gd(d)d)ej)Z*ej+edS)*z"Brian Quinlan (brian@sweetapp.com)N)_base)Full) SimpleQueue)wait)partialFcCsJdattj}x|D]\}}|jdqWx|D]\}}|jq2WdS)NT) _shutdownlist_threads_queuesitemsputjoin)r tqr2/usr/lib64/python3.6/concurrent/futures/process.py _python_exitOs  rc@seZdZddZddZdS)_RemoteTracebackcCs ||_dS)N)tb)selfrrrr__init__asz_RemoteTraceback.__init__cCs|jS)N)r)rrrr__str__csz_RemoteTraceback.__str__N)__name__ __module__ __qualname__rrrrrrr`src@seZdZddZddZdS)_ExceptionWithTracebackcCs0tjt|||}dj|}||_d||_dS)Nz """ %s""") tracebackformat_exceptiontyper excr)rr rrrrrgs z _ExceptionWithTraceback.__init__cCst|j|jffS)N) _rebuild_excr r)rrrr __reduce__lsz"_ExceptionWithTraceback.__reduce__N)rrrrr"rrrrrfsrcCst||_|S)N)r __cause__)r rrrrr!os r!c@seZdZddZdS) _WorkItemcCs||_||_||_||_dS)N)futurefnargskwargs)rr%r&r'r(rrrrtsz_WorkItem.__init__N)rrrrrrrrr$ssr$c@seZdZdddZdS) _ResultItemNcCs||_||_||_dS)N)work_id exceptionresult)rr*r+r,rrrr{sz_ResultItem.__init__)NN)rrrrrrrrr)zsr)c@seZdZddZdS) _CallItemcCs||_||_||_||_dS)N)r*r&r'r()rr*r&r'r(rrrrsz_CallItem.__init__N)rrrrrrrrr-sr-cgs0t|}x"ttj||}|s"dS|Vq WdS)N)ziptuple itertoolsislice) chunksize iterablesitchunkrrr _get_chunkss r6csfdd|DS)Ncsg|] }|qSrr).0r')r&rr sz"_process_chunk..r)r&r5r)r&r_process_chunks r9cCsx|jdd}|dkr(|jtjdSy|j|j|j}WnBtk r~}z&t||j }|jt |j |dWYdd}~XqX|jt |j |dqWdS)NT)block)r+)r,) getr osgetpidr&r'r( BaseExceptionr __traceback__r)r*) call_queue result_queueZ call_itemrer rrr_process_workers   & rDcCsxxr|jrdSy|jdd}Wntjk r4dSX||}|jjrh|jt||j|j |j ddq||=qqWdS)NF)r:T) Zfullr;queueZEmptyr%Zset_running_or_notify_cancelr r-r&r'r()pending_work_itemsZwork_idsr@r* work_itemrrr_add_call_item_to_queues   rHc sdfdd}fdd}|j}xlt||ddjD} t|g| } || krf|j} nr|dk rd_d_dx&|jD]\} } | jj t d~ qW|j xjD] }|j qW|dSt | trj| }|jsR|dSnJ| dk rR|j| jd} | dk rR| jrB| jj | jn| jj| j~ ||ry|sr|dSWntk rYnXdq(WdS) NcstpdkpjS)N)r_shutdown_threadr)executorrr shutting_downsz/_queue_management_worker..shutting_downcsZtddjD}xtd|D]}jdq"WjxjD] }|jqFWdS)Ncss|]}|jVqdS)N)Zis_alive)r7prrr szD_queue_management_worker..shutdown_worker..r)sumvaluesrangeZ put_nowaitcloser )Znb_children_aliveirL)r@ processesrrshutdown_workers z1_queue_management_worker..shutdown_workercSsg|] }|jqSr)sentinel)r7rLrrrr8 sz,_queue_management_worker..Tz^A process in the process pool was terminated abruptly while the future was running or pending.)Z_readerrHrOrZrecv_brokenrIr r%Z set_exceptionBrokenProcessPoolclearZ terminate isinstanceintpopr r*r+Z set_resultr,r)Zexecutor_referencerSrFZwork_ids_queuer@rArKrTreaderZ sentinelsZreadyZ result_itemr*rGrLr)r@rJrSr_queue_management_workersb        r]c Cshtrtrttdaytjd}Wnttfk r:dSX|dkrHdS|dkrTdSd|attdS)NTSC_SEM_NSEMS_MAXrz@system provides too few semaphores (%d available, 256 necessary))_system_limits_checked_system_limitedNotImplementedErrorr<sysconfAttributeError ValueError)Z nsems_maxrrr_check_system_limitsQsrgccs.x(|D] }|jx|r$|jVqWqWdS)N)reverser[)iterableelementrrr_chain_from_iterable_of_listshs rkc@s eZdZdS)rWN)rrrrrrrrWtsrWcsheZdZdddZddZddZdd Zejjj e_ dd d fd d Z dddZ ejj j e _ Z S)ProcessPoolExecutorNcCst|dkrtjpd|_n|dkr.td||_tj|jt|_d|j_ t |_ t j|_ d|_i|_d|_tj|_d|_d|_i|_dS)Nrrz"max_workers must be greater than 0TF)rgr< cpu_count _max_workersrfmultiprocessingZQueueEXTRA_QUEUED_CALLS _call_queueZ _ignore_epiper _result_queuerE _work_ids_queue_management_thread _processesrI threadingZLock_shutdown_lockrV _queue_count_pending_work_items)rZ max_workersrrrr|s$   zProcessPoolExecutor.__init__cCsp|jfdd}|jdkrl|jtjttj|||j|j |j |j |jfd|_d|j_ |jj |jt|j<dS)NcSs|jddS)N)r )_rrrr weakref_cbszFProcessPoolExecutor._start_queue_management_thread..weakref_cb)targetr'T)rrrt_adjust_process_countrvZThreadr]weakrefrefruryrsrqZdaemonstartr )rr{rrr_start_queue_management_threads   z2ProcessPoolExecutor._start_queue_management_threadcCsJxDtt|j|jD].}tjt|j|jfd}|j ||j|j <qWdS)N)r|r') rPlenrurnroZProcessrDrqrrrpid)rrzrLrrrr}s z)ProcessPoolExecutor._adjust_process_countc Os|jt|jrtd|jr$tdtj}t||||}||j|j <|j j |j |j d7_ |j j d|j |SQRXdS)NzKA child process terminated abruptly, the process pool is not usable anymorez*cannot schedule new futures after shutdownr)rwrVrWrI RuntimeErrorrZFuturer$ryrxrsr rrr)rr&r'r(fwrrrsubmits  zProcessPoolExecutor.submitr)timeoutr2cs:|dkrtdtjtt|t|d|i|d}t|S)Nrzchunksize must be >= 1.r2)r)rfsupermaprr9r6rk)rr&rr2r3Zresults) __class__rrrs  zProcessPoolExecutor.mapTc CsT|j d|_WdQRX|jr8|jjd|r8|jjd|_d|_d|_d|_dS)NT)rwrIrtrrr r rqru)rrrrrshutdowns  zProcessPoolExecutor.shutdown)N)T) rrrrrr}rrExecutor__doc__rr __classcell__rr)rrrl{s (   rl), __author__atexitr<Zconcurrent.futuresrrErrorZmultiprocessing.connectionrrvr~ functoolsrr0rWeakKeyDictionaryr rrrp Exceptionrrr!objectr$r)r-r6r9rDrHr]rarbrgrkrrWrrlregisterrrrr.sH         %s  futures/__pycache__/process.cpython-36.pyc000064400000036727151026775010014600 0ustar003 \ P@sbdZdZddlZddlZddlmZddlZddlmZddlZddlm Z ddl m Z ddl Z ddl Z ddlmZddlZddlZe jZd ad d Zd ZGd ddeZGdddZddZGdddeZGdddeZGdddeZddZddZ ddZ!dd Z"d!d"Z#d a$da%d#d$Z&d%d&Z'Gd'd(d(e(Z)Gd)d*d*ej*Z+ej,edS)+a* Implements ProcessPoolExecutor. The follow diagram and text describe the data-flow through the system: |======================= In-process =====================|== Out-of-process ==| +----------+ +----------+ +--------+ +-----------+ +---------+ | | => | Work Ids | => | | => | Call Q | => | | | | +----------+ | | +-----------+ | | | | | ... | | | | ... | | | | | | 6 | | | | 5, call() | | | | | | 7 | | | | ... | | | | Process | | ... | | Local | +-----------+ | Process | | Pool | +----------+ | Worker | | #1..n | | Executor | | Thread | | | | | +----------- + | | +-----------+ | | | | <=> | Work Items | <=> | | <= | Result Q | <= | | | | +------------+ | | +-----------+ | | | | | 6: call() | | | | ... | | | | | | future | | | | 4, result | | | | | | ... | | | | 3, except | | | +----------+ +------------+ +--------+ +-----------+ +---------+ Executor.submit() called: - creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict - adds the id of the _WorkItem to the "Work Ids" queue Local worker thread: - reads work ids from the "Work Ids" queue and looks up the corresponding WorkItem from the "Work Items" dict: if the work item has been cancelled then it is simply removed from the dict, otherwise it is repackaged as a _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). - reads _ResultItems from "Result Q", updates the future stored in the "Work Items" dict and deletes the dict entry Process #1..n: - reads _CallItems from "Call Q", executes the calls, and puts the resulting _ResultItems in "Result Q" z"Brian Quinlan (brian@sweetapp.com)N)_base)Full) SimpleQueue)wait)partialFcCsJdattj}x|D]\}}|jdqWx|D]\}}|jq2WdS)NT) _shutdownlist_threads_queuesitemsputjoin)r tqr2/usr/lib64/python3.6/concurrent/futures/process.py _python_exitOs  rc@seZdZddZddZdS)_RemoteTracebackcCs ||_dS)N)tb)selfrrrr__init__asz_RemoteTraceback.__init__cCs|jS)N)r)rrrr__str__csz_RemoteTraceback.__str__N)__name__ __module__ __qualname__rrrrrrr`src@seZdZddZddZdS)_ExceptionWithTracebackcCs0tjt|||}dj|}||_d||_dS)Nz """ %s""") tracebackformat_exceptiontyper excr)rr rrrrrgs z _ExceptionWithTraceback.__init__cCst|j|jffS)N) _rebuild_excr r)rrrr __reduce__lsz"_ExceptionWithTraceback.__reduce__N)rrrrr"rrrrrfsrcCst||_|S)N)r __cause__)r rrrrr!os r!c@seZdZddZdS) _WorkItemcCs||_||_||_||_dS)N)futurefnargskwargs)rr%r&r'r(rrrrtsz_WorkItem.__init__N)rrrrrrrrr$ssr$c@seZdZdddZdS) _ResultItemNcCs||_||_||_dS)N)work_id exceptionresult)rr*r+r,rrrr{sz_ResultItem.__init__)NN)rrrrrrrrr)zsr)c@seZdZddZdS) _CallItemcCs||_||_||_||_dS)N)r*r&r'r()rr*r&r'r(rrrrsz_CallItem.__init__N)rrrrrrrrr-sr-cgs0t|}x"ttj||}|s"dS|Vq WdS)z, Iterates over zip()ed iterables in chunks. N)ziptuple itertoolsislice) chunksize iterablesitchunkrrr _get_chunkss r6csfdd|DS)z Processes a chunk of an iterable passed to map. Runs the function passed to map() on a chunk of the iterable passed to map. This function is run in a separate process. csg|] }|qSrr).0r')r&rr sz"_process_chunk..r)r&r5r)r&r_process_chunks r9cCsx|jdd}|dkr(|jtjdSy|j|j|j}WnBtk r~}z&t||j }|jt |j |dWYdd}~XqX|jt |j |dqWdS)aEvaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A multiprocessing.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A multiprocessing.Queue of _ResultItems that will written to by the worker. shutdown: A multiprocessing.Event that will be set as a signal to the worker that it should exit when call_queue is empty. T)blockN)r+)r,) getr osgetpidr&r'r( BaseExceptionr __traceback__r)r*) call_queue result_queueZ call_itemrer rrr_process_workers   & rDcCsxxr|jrdSy|jdd}Wntjk r4dSX||}|jjrh|jt||j|j |j ddq||=qqWdS)aMFills call_queue with _WorkItems from pending_work_items. This function never blocks. Args: pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids are consumed and the corresponding _WorkItems from pending_work_items are transformed into _CallItems and put in call_queue. call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems. NF)r:T) Zfullr;queueZEmptyr%Zset_running_or_notify_cancelr r-r&r'r()pending_work_itemsZwork_idsr@r* work_itemrrr_add_call_item_to_queues   rHc sdfdd}fdd}|j}x~t||ddjD} | sNtt|g| } || krn|j} nr|dk rd_d_dx&|jD]\} } | j j t d ~ qW|j xjD] }|j qW|dSt| tr|stj| }|jsd|dSnJ| dk rd|j| jd} | dk rd| jrT| j j | jn| j j| j~ ||ry|s|dSWntk rYnXdq(WdS) aManages the communication between this process and the worker processes. This function is run in a local thread. Args: executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. process: A list of the multiprocessing.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). call_queue: A multiprocessing.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. result_queue: A multiprocessing.Queue of _ResultItems generated by the process workers. NcstpdkpjS)N)r_shutdown_threadr)executorrr shutting_downsz/_queue_management_worker..shutting_downcsZtddjD}xtd|D]}jdq"WjxjD] }|jqFWdS)Ncss|]}|jVqdS)N)Zis_alive)r7prrr szD_queue_management_worker..shutdown_worker..r)sumvaluesrangeZ put_nowaitcloser )Znb_children_aliveirL)r@ processesrrshutdown_workers z1_queue_management_worker..shutdown_workercSsg|] }|jqSr)sentinel)r7rLrrrr8 sz,_queue_management_worker..Tz^A process in the process pool was terminated abruptly while the future was running or pending.)Z_readerrHrOAssertionErrorrZrecv_brokenrIr r%Z set_exceptionBrokenProcessPoolclearZ terminate isinstanceintpopr r*r+Z set_resultr,r)Zexecutor_referencerSrFZwork_ids_queuer@rArKrTreaderZ sentinelsZreadyZ result_itemr*rGrLr)r@rJrSr_queue_management_workersf         r^c Cshtrtrttdaytjd}Wnttfk r:dSX|dkrHdS|dkrTdSd|attdS)NTSC_SEM_NSEMS_MAXrz@system provides too few semaphores (%d available, 256 necessary))_system_limits_checked_system_limitedNotImplementedErrorr<sysconfAttributeError ValueError)Z nsems_maxrrr_check_system_limitsQsrhccs.x(|D] }|jx|r$|jVqWqWdS)z Specialized implementation of itertools.chain.from_iterable. Each item in *iterable* should be a list. This function is careful not to keep references to yielded objects. N)reverser\)iterableelementrrr_chain_from_iterable_of_listshs rlc@seZdZdZdS)rXzy Raised when a process in a ProcessPoolExecutor terminated abruptly while a future was in the running state. N)rrr__doc__rrrrrXtsrXcsheZdZdddZddZddZdd Zejjj e_ dd d fd d Z dddZ ejj j e _ Z S)ProcessPoolExecutorNcCst|dkrtjpd|_n|dkr.td||_tj|jt|_d|j_ t |_ t j|_ d|_i|_d|_tj|_d|_d|_i|_dS)a/Initializes a new ProcessPoolExecutor instance. Args: max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors. Nrrz"max_workers must be greater than 0TF)rhr< cpu_count _max_workersrgmultiprocessingZQueueEXTRA_QUEUED_CALLS _call_queueZ _ignore_epiper _result_queuerE _work_ids_queue_management_thread _processesrI threadingZLock_shutdown_lockrW _queue_count_pending_work_items)rZ max_workersrrrr|s$   zProcessPoolExecutor.__init__cCsp|jfdd}|jdkrl|jtjttj|||j|j |j |j |jfd|_d|j_ |jj |jt|j<dS)NcSs|jddS)N)r )_rrrr weakref_cbszFProcessPoolExecutor._start_queue_management_thread..weakref_cb)targetr'T)rtrv_adjust_process_countrxZThreadr^weakrefrefrwr{rursZdaemonstartr )rr}rrr_start_queue_management_threads   z2ProcessPoolExecutor._start_queue_management_threadcCsJxDtt|j|jD].}tjt|j|jfd}|j ||j|j <qWdS)N)r~r') rPlenrwrprqZProcessrDrsrtrpid)rr|rLrrrrs z)ProcessPoolExecutor._adjust_process_countc Os|jt|jrtd|jr$tdtj}t||||}||j|j <|j j |j |j d7_ |j j d|j |SQRXdS)NzKA child process terminated abruptly, the process pool is not usable anymorez*cannot schedule new futures after shutdownr)ryrWrXrI RuntimeErrorrZFuturer$r{rzrur rtr)rr&r'r(fwrrrsubmits  zProcessPoolExecutor.submitr)timeoutr2cs:|dkrtdtjtt|t|d|i|d}t|S)ajReturns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. chunksize: If greater than one, the iterables will be chopped into chunks of size chunksize and submitted to the process pool. If set to one, the items in the list will be sent one at a time. Returns: An iterator equivalent to: map(func, *iterables) but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If fn(*args) raises for any values. rzchunksize must be >= 1.r2)r)rgsupermaprr9r6rl)rr&rr2r3Zresults) __class__rrrs  zProcessPoolExecutor.mapTc CsT|j d|_WdQRX|jr8|jjd|r8|jjd|_d|_d|_d|_dS)NT)ryrIrvrtr r rsrw)rrrrrshutdowns  zProcessPoolExecutor.shutdown)N)T) rrrrrrrrExecutorrmrr __classcell__rr)rrrn{s (   rn)-rm __author__atexitr<Zconcurrent.futuresrrErrqrZmultiprocessing.connectionrrxr functoolsrr0rWeakKeyDictionaryr rrrr Exceptionrrr!objectr$r)r-r6r9rDrHr^rbrcrhrlrrXrrnregisterrrrr,sJ         %s  futures/__pycache__/thread.cpython-36.opt-1.pyc000064400000007512151026775010015316 0ustar003 \@sdZdZddlZddlmZddlZddlZddlZddlZddl Z ej Z da ddZ eje Gdd d eZd d ZGd d d ejZdS)zImplements ThreadPoolExecutor.z"Brian Quinlan (brian@sweetapp.com)N)_baseFcCsJdattj}x|D]\}}|jdqWx|D]\}}|jq2WdS)NT) _shutdownlist_threads_queuesitemsputjoin)rtqr 1/usr/lib64/python3.6/concurrent/futures/thread.py _python_exit!s  r c@seZdZddZddZdS) _WorkItemcCs||_||_||_||_dS)N)futurefnargskwargs)selfrrrrr r r __init__-sz_WorkItem.__init__cCsf|jjsdSy|j|j|j}Wn2tk rT}z|jj|d}WYdd}~XnX|jj|dS)N)rZset_running_or_notify_cancelrrr BaseExceptionZ set_exceptionZ set_result)rresultexcr r r run3s  z _WorkItem.runN)__name__ __module__ __qualname__rrr r r r r,src Cs|yRxL|jdd}|dk r$|j~q|}ts<|dks<|jrJ|jddS~qWWn$tk rvtjjdddYnXdS)NT)blockzException in worker)exc_info)getrrrrrZLOGGERZcritical)Zexecutor_referenceZ work_queueZ work_itemZexecutorr r r _worker@s   rc@sReZdZejjZd ddZddZe j jj e_ ddZ d d d Z e j j j e _ dS)ThreadPoolExecutorNcCsf|dkrtjpdd}|dkr(td||_tj|_t|_d|_ t j |_ |p^d|j |_dS)aInitializes a new ThreadPoolExecutor instance. Args: max_workers: The maximum number of threads that can be used to execute the given calls. thread_name_prefix: An optional name prefix to give our threads. Nrz"max_workers must be greater than 0FzThreadPoolExecutor-%d)os cpu_count ValueError _max_workersqueueZQueue _work_queueset_threadsr threadingZLock_shutdown_lock_counter_thread_name_prefix)rZ max_workersZthread_name_prefixr r r r[s  zThreadPoolExecutor.__init__c OsN|j>|jrtdtj}t||||}|jj||j|SQRXdS)Nz*cannot schedule new futures after shutdown) r-r RuntimeErrorrZFuturerr)r_adjust_thread_count)rrrrfwr r r submitrs zThreadPoolExecutor.submitcCsz|jfdd}t|j}||jkrvd|jp,||f}tj|ttj |||jfd}d|_ |j |jj ||jt |<dS)NcSs|jddS)N)r)_r r r r weakref_cbsz;ThreadPoolExecutor._adjust_thread_count..weakref_cbz%s_%d)nametargetrT)r)lenr+r'r/r,ZThreadrweakrefrefZdaemonstartaddr)rr6Z num_threadsZ thread_namer r r r r1s      z'ThreadPoolExecutor._adjust_thread_countTc CsD|jd|_|jjdWdQRX|r@x|jD] }|jq0WdS)NT)r-rr)rr+r)rwaitr r r r shutdowns  zThreadPoolExecutor.shutdown)Nr!)T)rrr itertoolscount__next__r.rr4rExecutor__doc__r1r?r r r r r Vs    r )rD __author__atexitZconcurrent.futuresrr@r(r,r:r$WeakKeyDictionaryrrr registerobjectrrrCr r r r r s  futures/__pycache__/_base.cpython-36.pyc000064400000050232151026775010014156 0ustar003 \R @sPdZddlZddlZddlZddlZdZdZdZdZdZ dZ d Z d Z d Z e e e e e gZe d e d e de de diZejdZGdddeZGdddeZGdddeZGdddeZGdddeZGdddeZGdddeZGdd d eZd!d"Zd#d$Zd/d%d&Zejd'd(Z defd)d*Z!Gd+d,d,eZ"Gd-d.d.eZ#dS)0z"Brian Quinlan (brian@sweetapp.com)NFIRST_COMPLETEDFIRST_EXCEPTION ALL_COMPLETED _AS_COMPLETEDPENDINGRUNNING CANCELLEDCANCELLED_AND_NOTIFIEDFINISHEDpendingrunning cancelledfinishedzconcurrent.futuresc@seZdZdZdS)Errorz-Base class for all future-related exceptions.N)__name__ __module__ __qualname____doc__rr0/usr/lib64/python3.6/concurrent/futures/_base.pyr,src@seZdZdZdS)CancelledErrorzThe Future was cancelled.N)rrrrrrrrr0src@seZdZdZdS) TimeoutErrorz*The operation exceeded the given deadline.N)rrrrrrrrr4src@s0eZdZdZddZddZddZdd Zd S) _Waiterz;Provides the event that wait() and as_completed() block on.cCstj|_g|_dS)N) threadingZEventeventfinished_futures)selfrrr__init__:s z_Waiter.__init__cCs|jj|dS)N)rappend)rfuturerrr add_result>sz_Waiter.add_resultcCs|jj|dS)N)rr)rrrrr add_exceptionAsz_Waiter.add_exceptioncCs|jj|dS)N)rr)rrrrr add_cancelledDsz_Waiter.add_cancelledN)rrrrrr r!r"rrrrr8s rcsDeZdZdZfddZfddZfddZfdd ZZS) _AsCompletedWaiterzUsed by as_completed().cstt|jtj|_dS)N)superr#rrLocklock)r) __class__rrrJsz_AsCompletedWaiter.__init__c s0|j tt|j||jjWdQRXdS)N)r&r$r#r rset)rr)r'rrr Nsz_AsCompletedWaiter.add_resultc s0|j tt|j||jjWdQRXdS)N)r&r$r#r!rr()rr)r'rrr!Ssz _AsCompletedWaiter.add_exceptionc s0|j tt|j||jjWdQRXdS)N)r&r$r#r"rr()rr)r'rrr"Xsz _AsCompletedWaiter.add_cancelled) rrrrrr r!r" __classcell__rr)r'rr#Gs    r#cs8eZdZdZfddZfddZfddZZS)_FirstCompletedWaiterz*Used by wait(return_when=FIRST_COMPLETED).cstj||jjdS)N)r$r rr()rr)r'rrr `s z _FirstCompletedWaiter.add_resultcstj||jjdS)N)r$r!rr()rr)r'rrr!ds z#_FirstCompletedWaiter.add_exceptioncstj||jjdS)N)r$r"rr()rr)r'rrr"hs z#_FirstCompletedWaiter.add_cancelled)rrrrr r!r"r)rr)r'rr*]s  r*csLeZdZdZfddZddZfddZfdd Zfd d ZZ S) _AllCompletedWaiterzsz._create_and_install_waiters..T)r-FzInvalid return condition: %r) rr#rr*sumrr+r ValueError_waitersr)fs return_whenwaiterZ pending_countr=rrr_create_and_install_waiterss  rEc csXxR|rR|d}x|D]}|j|qW|j|jj|WdQRX~|jVqWdS)a~ Iterate on the list *fs*, yielding finished futures one by one in reverse order. Before yielding a future, *waiter* is removed from its waiters and the future is removed from each set in the collection of sets *ref_collect*. The aim of this function is to avoid keeping stale references after the future is yielded and before the iterator resumes. r.N)remover5rApop)rBrD ref_collectr=Z futures_setrrr_yield_finished_futuress  rJc csJ|dk r|tj}t|}t|}t|*tdd|D}||}t|t}WdQRXt|}zt|||fdEdHx|r|dkrd}n(|tj}|dkrt dt||f|j j ||j |j }g|_ |j jWdQRX|jt||||fdEdHq~WWdx,|D]$}|j|jj|WdQRXqWXdS)anAn iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). If any given Futures are duplicated, they will be returned once. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Ncss |]}|jttgkr|VqdS)N)r;r r )r<r=rrrr>szas_completed..)rIrz%d (of %d) futures unfinished)time monotonicr(lenr0rErlistrJrrwaitr&rclearreverser5rArG) rBtimeoutend_timeZ total_futuresrr rDZ wait_timeoutr=rrr as_completeds@     rTDoneAndNotDoneFuturesz done not_donecCst|tdd|D}t||}|tkr>|r>t||S|tkrf|rftdd|Drft||St|t|krt||St||}WdQRX|jj |x*|D]"}|j |j j |WdQRXqW|j |jt|t||S)aWait for the futures in the given sequence to complete. Args: fs: The sequence of Futures (possibly created by different Executors) to wait upon. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. return_when: Indicates when this function should return. The options are: FIRST_COMPLETED - Return when any future finishes or is cancelled. FIRST_EXCEPTION - Return when any future finishes by raising an exception. If no future raises an exception then it is equivalent to ALL_COMPLETED. ALL_COMPLETED - Return when all futures finish or are cancelled. Returns: A named 2-tuple of sets. The first set, named 'done', contains the futures that completed (is finished or cancelled) before the wait completed. The second set, named 'not_done', contains uncompleted futures. css |]}|jttgkr|VqdS)N)r;r r )r<r=rrrr>szwait..css(|] }|j r|jdk r|VqdS)N)r exception)r<r=rrrr>$sN)r0r(rrUranyrMrErrOr5rArGupdater)rBrRrCdoneZnot_donerDr=rrrrOs"          rOc@seZdZdZddZddZddZdd Zd d Zd d Z ddZ ddZ ddZ dddZ d ddZddZddZddZdS)!Futurez5Represents the result of an asynchronous computation.cCs,tj|_t|_d|_d|_g|_g|_dS)z8Initializes the future. Should not be called by clients.N) rZ Conditionr5rr;_result _exceptionrA_done_callbacks)rrrrr8s  zFuture.__init__c CsBx<|jD]2}y ||Wqtk r8tjd|YqXqWdS)Nz!exception calling callback for %r)r] ExceptionLOGGERrV)rcallbackrrr_invoke_callbacksAs   zFuture._invoke_callbacksc Cs|jv|jtkr`|jrz <%s at %#x state=%s returned %s>z<%s at %#x state=%s>) r5r;r r\r'rr3_STATE_TO_DESCRIPTION_MAPr[)rrrr__repr__Hs" zFuture.__repr__c CsR|j:|jttgkrdS|jttgkr,dSt|_|jjWdQRX|jdS)zCancel the future if possible. Returns True if the future was cancelled, False otherwise. A future cannot be cancelled if it is running or has already completed. FTN)r5r;rr rr notify_allra)rrrrcancel\sz Future.cancelc Cs |j|jttgkSQRXdS)z(Return True if the future was cancelled.N)r5r;rr )rrrrr oszFuture.cancelledc Cs|j |jtkSQRXdS)z1Return True if the future is currently executing.N)r5r;r)rrrrr tszFuture.runningc Cs"|j|jtttgkSQRXdS)z>Return True of the future was cancelled or finished executing.N)r5r;rr r )rrrrrYysz Future.donecCs|jr|jn|jSdS)N)r\r[)rrrrZ __get_result~szFuture.__get_resultc Cs>|j&|jtttgkr(|jj|dSWdQRX||dS)a%Attaches a callable that will be called when the future finishes. Args: fn: A callable that will be called with this future as its only argument when the future completes or is cancelled. The callable will always be called by a thread in the same process in which it was added. If the future has already completed or been cancelled then the callable will be called immediately. These callables are called in the order that they were added. N)r5r;rr r r]r)rfnrrradd_done_callbacks  zFuture.add_done_callbackNc Csx|jh|jttgkrtn|jtkr0|jS|jj||jttgkrRtn|jtkrd|jStWdQRXdS)aBReturn the result of the call that the future represents. Args: timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time. Returns: The result of the call that the future represents. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. Exception: If the call raised then that exception will be raised. N) r5r;rr rr _Future__get_resultrOr)rrRrrrresults   z Future.resultc Cst|jd|jttgkrtn|jtkr.|jS|jj||jttgkrPtn|jtkr`|jStWdQRXdS)aUReturn the exception raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. N) r5r;rr rr r\rOr)rrRrrrrVs   zFuture.exceptionc Cst|jd|jtkr6t|_x|jD]}|j|q WdS|jtkrJt|_dStj dt ||jt dWdQRXdS)aMark the future as running or process any cancel notifications. Should only be used by Executor implementations and unit tests. If the future has been cancelled (cancel() was called and returned True) then any threads waiting on the future completing (though calls to as_completed() or wait()) are notified and False is returned. If the future was not cancelled then it is put in the running state (future calls to running() will return True) and True is returned. This method should be called by Executor implementations before executing the work associated with this future. If this method returns False then the work should not be executed. Returns: False if the Future was cancelled, True otherwise. Raises: RuntimeError: if this method was already called or if set_result() or set_exception() was called. FTz!Future %s in unexpected state: %szFuture in unexpected stateN) r5r;rr rAr"rrr_Zcriticalr3 RuntimeError)rrDrrrset_running_or_notify_cancels   z#Future.set_running_or_notify_cancelc CsN|j6||_t|_x|jD]}|j|qW|jjWdQRX|jdS)zSets the return value of work associated with the future. Should only be used by Executor implementations and unit tests. N)r5r[r r;rAr rdra)rrirDrrr set_results zFuture.set_resultc CsN|j6||_t|_x|jD]}|j|qW|jjWdQRX|jdS)zSets the result of the future as being the given exception. Should only be used by Executor implementations and unit tests. N)r5r\r r;rAr!rdra)rrVrDrrr set_exception s zFuture.set_exception)N)N)rrrrrrarcrer r rYrhrgrirVrkrlrmrrrrrZ5s   "( rZc@sBeZdZdZddZdddddZdd d Zd d ZddZdS)ExecutorzCThis is an abstract base class for concrete asynchronous executors.cOs tdS)a Submits a callable to be executed with the given arguments. Schedules the callable to be executed as fn(*args, **kwargs) and returns a Future instance representing the execution of the callable. Returns: A Future representing the given call. N)NotImplementedError)rrfr9kwargsrrrsubmits zExecutor.submitNr.)rR chunksizecsBdk rtjfddt|Dfdd}|S)a}Returns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. chunksize: The size of the chunks the iterable will be broken into before being passed to a child process. This argument is only used by ProcessPoolExecutor; it is ignored by ThreadPoolExecutor. Returns: An iterator equivalent to: map(func, *iterables) but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If fn(*args) raises for any values. Ncsg|]}jf|qSr)rq)r<r9)rfrrr ?sz Executor.map..c 3sdzFjx8rBdkr(jjVq jjtjVq WWdxD] }|jqNWXdS)N)rQrHrirKrLre)r)rSrBrRrrresult_iteratorCs  z%Executor.map..result_iterator)rKrLzip)rrfrRrr iterablesrtr)rSrfrBrrRrmap&s   z Executor.mapTcCsdS)aClean-up the resources associated with the Executor. It is safe to call this method several times. Otherwise, no other methods can be called after this one. Args: wait: If True then shutdown will not return until all running futures have finished executing and the resources used by the executor have been reclaimed. Nr)rrOrrrshutdownRs zExecutor.shutdowncCs|S)Nr)rrrrr7_szExecutor.__enter__cCs|jdddS)NT)rOF)rx)rexc_typeZexc_valZexc_tbrrrr:bs zExecutor.__exit__)T) rrrrrqrwrxr7r:rrrrrns  , rn)N)$ __author__ collectionsZloggingrrKrrrrrrrr r Z_FUTURE_STATESrbZ getLoggerr_r^rrrobjectrr#r*r+r0rErJrT namedtuplerUrOrZrnrrrrsR  >1dfutures/thread.py000064400000012607151026775010010074 0ustar00# Copyright 2009 Brian Quinlan. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Implements ThreadPoolExecutor.""" __author__ = 'Brian Quinlan (brian@sweetapp.com)' import atexit from concurrent.futures import _base import itertools import queue import threading import weakref import os # Workers are created as daemon threads. This is done to allow the interpreter # to exit when there are still idle threads in a ThreadPoolExecutor's thread # pool (i.e. shutdown() was not called). However, allowing workers to die with # the interpreter has two undesirable properties: # - The workers would still be running during interpreter shutdown, # meaning that they would fail in unpredictable ways. # - The workers could be killed while evaluating a work item, which could # be bad if the callable being evaluated has external side-effects e.g. # writing to a file. # # To work around this problem, an exit handler is installed which tells the # workers to exit when their work queues are empty and then waits until the # threads finish. _threads_queues = weakref.WeakKeyDictionary() _shutdown = False def _python_exit(): global _shutdown _shutdown = True items = list(_threads_queues.items()) for t, q in items: q.put(None) for t, q in items: t.join() atexit.register(_python_exit) class _WorkItem(object): def __init__(self, future, fn, args, kwargs): self.future = future self.fn = fn self.args = args self.kwargs = kwargs def run(self): if not self.future.set_running_or_notify_cancel(): return try: result = self.fn(*self.args, **self.kwargs) except BaseException as exc: self.future.set_exception(exc) # Break a reference cycle with the exception 'exc' self = None else: self.future.set_result(result) def _worker(executor_reference, work_queue): try: while True: work_item = work_queue.get(block=True) if work_item is not None: work_item.run() # Delete references to object. See issue16284 del work_item continue executor = executor_reference() # Exit if: # - The interpreter is shutting down OR # - The executor that owns the worker has been collected OR # - The executor that owns the worker has been shutdown. if _shutdown or executor is None or executor._shutdown: # Notice other workers work_queue.put(None) return del executor except BaseException: _base.LOGGER.critical('Exception in worker', exc_info=True) class ThreadPoolExecutor(_base.Executor): # Used to assign unique thread names when thread_name_prefix is not supplied. _counter = itertools.count().__next__ def __init__(self, max_workers=None, thread_name_prefix=''): """Initializes a new ThreadPoolExecutor instance. Args: max_workers: The maximum number of threads that can be used to execute the given calls. thread_name_prefix: An optional name prefix to give our threads. """ if max_workers is None: # Use this number because ThreadPoolExecutor is often # used to overlap I/O instead of CPU work. max_workers = (os.cpu_count() or 1) * 5 if max_workers <= 0: raise ValueError("max_workers must be greater than 0") self._max_workers = max_workers self._work_queue = queue.Queue() self._threads = set() self._shutdown = False self._shutdown_lock = threading.Lock() self._thread_name_prefix = (thread_name_prefix or ("ThreadPoolExecutor-%d" % self._counter())) def submit(self, fn, *args, **kwargs): with self._shutdown_lock: if self._shutdown: raise RuntimeError('cannot schedule new futures after shutdown') f = _base.Future() w = _WorkItem(f, fn, args, kwargs) self._work_queue.put(w) self._adjust_thread_count() return f submit.__doc__ = _base.Executor.submit.__doc__ def _adjust_thread_count(self): # When the executor gets lost, the weakref callback will wake up # the worker threads. def weakref_cb(_, q=self._work_queue): q.put(None) # TODO(bquinlan): Should avoid creating new threads if there are more # idle threads than items in the work queue. num_threads = len(self._threads) if num_threads < self._max_workers: thread_name = '%s_%d' % (self._thread_name_prefix or self, num_threads) t = threading.Thread(name=thread_name, target=_worker, args=(weakref.ref(self, weakref_cb), self._work_queue)) t.daemon = True t.start() self._threads.add(t) _threads_queues[t] = self._work_queue def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown = True self._work_queue.put(None) if wait: for t in self._threads: t.join() shutdown.__doc__ = _base.Executor.shutdown.__doc__ futures/__init__.py000064400000001440151026775010010355 0ustar00# Copyright 2009 Brian Quinlan. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Execute computations asynchronously using threads or processes.""" __author__ = 'Brian Quinlan (brian@sweetapp.com)' from concurrent.futures._base import (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED, CancelledError, TimeoutError, Future, Executor, wait, as_completed) from concurrent.futures.process import ProcessPoolExecutor from concurrent.futures.thread import ThreadPoolExecutor __init__.py000064400000000046151026775010006661 0ustar00# This directory is a Python package.