asda?‰PNG  IHDR ? f ??C1 sRGB ??é gAMA ±? üa pHYs ? ??o¨d GIDATx^íüL”÷e÷Y?a?("Bh?_ò???¢§?q5k?*:t0A-o??¥]VkJ¢M??f?±8\k2íll£1]q?ù???T usr/lib64/python3.6/curses/__init__.py000064400000006446151027145650013502 0ustar00"""curses The main package for curses support for Python. Normally used by importing the package, and perhaps a particular module inside it. import curses from curses import textpad curses.initscr() ... """ from _curses import * import os as _os import sys as _sys # Some constants, most notably the ACS_* ones, are only added to the C # _curses module's dictionary after initscr() is called. (Some # versions of SGI's curses don't define values for those constants # until initscr() has been called.) This wrapper function calls the # underlying C initscr(), and then copies the constants from the # _curses module to the curses package's dictionary. Don't do 'from # curses import *' if you'll be needing the ACS_* constants. def initscr(): import _curses, curses # we call setupterm() here because it raises an error # instead of calling exit() in error cases. setupterm(term=_os.environ.get("TERM", "unknown"), fd=_sys.__stdout__.fileno()) stdscr = _curses.initscr() for key, value in _curses.__dict__.items(): if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'): setattr(curses, key, value) return stdscr # This is a similar wrapper for start_color(), which adds the COLORS and # COLOR_PAIRS variables which are only available after start_color() is # called. def start_color(): import _curses, curses retval = _curses.start_color() if hasattr(_curses, 'COLORS'): curses.COLORS = _curses.COLORS if hasattr(_curses, 'COLOR_PAIRS'): curses.COLOR_PAIRS = _curses.COLOR_PAIRS return retval # Import Python has_key() implementation if _curses doesn't contain has_key() try: has_key except NameError: from .has_key import has_key # Wrapper for the entire curses-based application. Runs a function which # should be the rest of your curses-based application. If the application # raises an exception, wrapper() will restore the terminal to a sane state so # you can read the resulting traceback. def wrapper(func, *args, **kwds): """Wrapper function that initializes curses and calls another function, restoring normal keyboard/screen behavior on error. The callable object 'func' is then passed the main window 'stdscr' as its first argument, followed by any other arguments passed to wrapper(). """ try: # Initialize curses stdscr = initscr() # Turn off echoing of keys, and enter cbreak mode, # where no buffering is performed on keyboard input noecho() cbreak() # In keypad mode, escape sequences for special keys # (like the cursor keys) will be interpreted and # a special value like curses.KEY_LEFT will be returned stdscr.keypad(1) # Start color, too. Harmless if the terminal doesn't have # color; user can test with has_color() later on. The try/catch # works around a minor bit of over-conscientiousness in the curses # module -- the error return from C start_color() is ignorable. try: start_color() except: pass return func(stdscr, *args, **kwds) finally: # Set everything back to normal if 'stdscr' in locals(): stdscr.keypad(0) echo() nocbreak() endwin() usr/lib64/python3.11/unittest/__init__.py000064400000007536151027146170014130 0ustar00""" Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's Smalltalk testing framework (used with permission). This module contains the core framework classes that form the basis of specific test cases and suites (TestCase, TestSuite etc.), and also a text-based utility class for running the tests and reporting the results (TextTestRunner). Simple usage: import unittest class IntegerArithmeticTestCase(unittest.TestCase): def testAdd(self): # test method names begin with 'test' self.assertEqual((1 + 2), 3) self.assertEqual(0 + 1, 1) def testMultiply(self): self.assertEqual((0 * 10), 0) self.assertEqual((5 * 8), 40) if __name__ == '__main__': unittest.main() Further information is available in the bundled documentation, and from http://docs.python.org/library/unittest.html Copyright (c) 1999-2003 Steve Purcell Copyright (c) 2003-2010 Python Software Foundation This module is free software, and you may redistribute it and/or modify it under the same terms as Python itself, so long as this copyright message and disclaimer are retained in their original form. IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. """ __all__ = ['TestResult', 'TestCase', 'IsolatedAsyncioTestCase', 'TestSuite', 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', 'expectedFailure', 'TextTestResult', 'installHandler', 'registerResult', 'removeResult', 'removeHandler', 'addModuleCleanup', 'doModuleCleanups', 'enterModuleContext'] # Expose obsolete functions for backwards compatibility # bpo-5846: Deprecated in Python 3.11, scheduled for removal in Python 3.13. __all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases']) __unittest = True from .result import TestResult from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip, skipIf, skipUnless, expectedFailure, doModuleCleanups, enterModuleContext) from .suite import BaseTestSuite, TestSuite from .loader import TestLoader, defaultTestLoader from .main import TestProgram, main from .runner import TextTestRunner, TextTestResult from .signals import installHandler, registerResult, removeResult, removeHandler # IsolatedAsyncioTestCase will be imported lazily. from .loader import makeSuite, getTestCaseNames, findTestCases # deprecated _TextTestResult = TextTestResult # There are no tests here, so don't try to run anything discovered from # introspecting the symbols (e.g. FunctionTestCase). Instead, all our # tests come from within unittest.test. def load_tests(loader, tests, pattern): import os.path # top level directory cached on loader instance this_dir = os.path.dirname(__file__) return loader.discover(start_dir=this_dir, pattern=pattern) # Lazy import of IsolatedAsyncioTestCase from .async_case # It imports asyncio, which is relatively heavy, but most tests # do not need it. def __dir__(): return globals().keys() | {'IsolatedAsyncioTestCase'} def __getattr__(name): if name == 'IsolatedAsyncioTestCase': global IsolatedAsyncioTestCase from .async_case import IsolatedAsyncioTestCase return IsolatedAsyncioTestCase raise AttributeError(f"module {__name__!r} has no attribute {name!r}") usr/lib64/python2.7/importlib/__init__.py000064400000002457151027147320014171 0ustar00"""Backport of importlib.import_module from 3.x.""" # While not critical (and in no way guaranteed!), it would be nice to keep this # code compatible with Python 2.3. import sys def _resolve_name(name, package, level): """Return the absolute name of the module to be imported.""" if not hasattr(package, 'rindex'): raise ValueError("'package' not set to a string") dot = len(package) for x in xrange(level, 1, -1): try: dot = package.rindex('.', 0, dot) except ValueError: raise ValueError("attempted relative import beyond top-level " "package") return "%s.%s" % (package[:dot], name) def import_module(name, package=None): """Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import. """ if name.startswith('.'): if not package: raise TypeError("relative imports require the 'package' argument") level = 0 for character in name: if character != '.': break level += 1 name = _resolve_name(name[level:], package, level) __import__(name) return sys.modules[name] usr/lib64/python3.11/xmlrpc/__init__.py000064400000000046151027147350013544 0ustar00# This directory is a Python package. usr/lib64/python3.11/collections/__init__.py000064400000145462151027151270014565 0ustar00'''This module implements specialized container datatypes providing alternatives to Python's general purpose built-in containers, dict, list, set, and tuple. * namedtuple factory function for creating tuple subclasses with named fields * deque list-like container with fast appends and pops on either end * ChainMap dict-like class for creating a single view of multiple mappings * Counter dict subclass for counting hashable objects * OrderedDict dict subclass that remembers the order entries were added * defaultdict dict subclass that calls a factory function to supply missing values * UserDict wrapper around dictionary objects for easier dict subclassing * UserList wrapper around list objects for easier list subclassing * UserString wrapper around string objects for easier string subclassing ''' __all__ = [ 'ChainMap', 'Counter', 'OrderedDict', 'UserDict', 'UserList', 'UserString', 'defaultdict', 'deque', 'namedtuple', ] import _collections_abc import sys as _sys from itertools import chain as _chain from itertools import repeat as _repeat from itertools import starmap as _starmap from keyword import iskeyword as _iskeyword from operator import eq as _eq from operator import itemgetter as _itemgetter from reprlib import recursive_repr as _recursive_repr from _weakref import proxy as _proxy try: from _collections import deque except ImportError: pass else: _collections_abc.MutableSequence.register(deque) try: from _collections import defaultdict except ImportError: pass ################################################################################ ### OrderedDict ################################################################################ class _OrderedDictKeysView(_collections_abc.KeysView): def __reversed__(self): yield from reversed(self._mapping) class _OrderedDictItemsView(_collections_abc.ItemsView): def __reversed__(self): for key in reversed(self._mapping): yield (key, self._mapping[key]) class _OrderedDictValuesView(_collections_abc.ValuesView): def __reversed__(self): for key in reversed(self._mapping): yield self._mapping[key] class _Link(object): __slots__ = 'prev', 'next', 'key', '__weakref__' class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as regular dictionaries. # The internal self.__map dict maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # The sentinel is in self.__hardroot with a weakref proxy in self.__root. # The prev links are weakref proxies (to prevent circular references). # Individual links are kept alive by the hard reference in self.__map. # Those hard references disappear when a key is deleted from an OrderedDict. def __new__(cls, /, *args, **kwds): "Create the ordered dict object and set up the underlying structures." self = dict.__new__(cls) self.__hardroot = _Link() self.__root = root = _proxy(self.__hardroot) root.prev = root.next = root self.__map = {} return self def __init__(self, other=(), /, **kwds): '''Initialize an ordered dictionary. The signature is the same as regular dictionaries. Keyword argument order is preserved. ''' self.__update(other, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link at the end of the linked list, # and the inherited dictionary is updated with the new key/value pair. if key not in self: self.__map[key] = link = Link() root = self.__root last = root.prev link.prev, link.next, link.key = last, root, key last.next = link root.prev = proxy(link) dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which gets # removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link = self.__map.pop(key) link_prev = link.prev link_next = link.next link_prev.next = link_next link_next.prev = link_prev link.prev = None link.next = None def __iter__(self): 'od.__iter__() <==> iter(od)' # Traverse the linked list in order. root = self.__root curr = root.next while curr is not root: yield curr.key curr = curr.next def __reversed__(self): 'od.__reversed__() <==> reversed(od)' # Traverse the linked list in reverse order. root = self.__root curr = root.prev while curr is not root: yield curr.key curr = curr.prev def clear(self): 'od.clear() -> None. Remove all items from od.' root = self.__root root.prev = root.next = root self.__map.clear() dict.clear(self) def popitem(self, last=True): '''Remove and return a (key, value) pair from the dictionary. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root.prev link_prev = link.prev link_prev.next = root root.prev = link_prev else: link = root.next link_next = link.next root.next = link_next link_next.prev = root key = link.key del self.__map[key] value = dict.pop(self, key) return key, value def move_to_end(self, key, last=True): '''Move an existing element to the end (or beginning if last is false). Raise KeyError if the element does not exist. ''' link = self.__map[key] link_prev = link.prev link_next = link.next soft_link = link_next.prev link_prev.next = link_next link_next.prev = link_prev root = self.__root if last: last = root.prev link.prev = last link.next = root root.prev = soft_link last.next = link else: first = root.next link.prev = root link.next = first first.prev = soft_link root.next = link def __sizeof__(self): sizeof = _sys.getsizeof n = len(self) + 1 # number of links including root size = sizeof(self.__dict__) # instance dictionary size += sizeof(self.__map) * 2 # internal dict and inherited dict size += sizeof(self.__hardroot) * n # link objects size += sizeof(self.__root) * n # proxy objects return size update = __update = _collections_abc.MutableMapping.update def keys(self): "D.keys() -> a set-like object providing a view on D's keys" return _OrderedDictKeysView(self) def items(self): "D.items() -> a set-like object providing a view on D's items" return _OrderedDictItemsView(self) def values(self): "D.values() -> an object providing a view on D's values" return _OrderedDictValuesView(self) __ne__ = _collections_abc.MutableMapping.__ne__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' marker = self.__marker result = dict.pop(self, key, marker) if result is not marker: # The same as in __delitem__(). link = self.__map.pop(key) link_prev = link.prev link_next = link.next link_prev.next = link_next link_next.prev = link_prev link.prev = None link.next = None return result if default is marker: raise KeyError(key) return default def setdefault(self, key, default=None): '''Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default. ''' if key in self: return self[key] self[key] = default return default @_recursive_repr() def __repr__(self): 'od.__repr__() <==> repr(od)' if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self.items())) def __reduce__(self): 'Return state information for pickling' state = self.__getstate__() if state: if isinstance(state, tuple): state, slots = state else: slots = {} state = state.copy() slots = slots.copy() for k in vars(OrderedDict()): state.pop(k, None) slots.pop(k, None) if slots: state = state, slots else: state = state or None return self.__class__, (), state, None, iter(self.items()) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''Create a new ordered dictionary with keys from iterable and values set to value. ''' self = cls() for key in iterable: self[key] = value return self def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return dict.__eq__(self, other) and all(map(_eq, self, other)) return dict.__eq__(self, other) def __ior__(self, other): self.update(other) return self def __or__(self, other): if not isinstance(other, dict): return NotImplemented new = self.__class__(self) new.update(other) return new def __ror__(self, other): if not isinstance(other, dict): return NotImplemented new = self.__class__(other) new.update(self) return new try: from _collections import OrderedDict except ImportError: # Leave the pure Python version in place. pass ################################################################################ ### namedtuple ################################################################################ try: from _collections import _tuplegetter except ImportError: _tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc) def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result ######################################################################## ### Counter ######################################################################## def _count_elements(mapping, iterable): 'Tally elements from the iterable.' mapping_get = mapping.get for elem in iterable: mapping[elem] = mapping_get(elem, 0) + 1 try: # Load C helper function if available from _collections import _count_elements except ImportError: pass class Counter(dict): '''Dict subclass for counting hashable items. Sometimes called a bag or multiset. Elements are stored as dictionary keys and their counts are stored as dictionary values. >>> c = Counter('abcdeabcdabcaba') # count elements from a string >>> c.most_common(3) # three most common elements [('a', 5), ('b', 4), ('c', 3)] >>> sorted(c) # list all unique elements ['a', 'b', 'c', 'd', 'e'] >>> ''.join(sorted(c.elements())) # list elements with repetitions 'aaaaabbbbcccdde' >>> sum(c.values()) # total of all counts 15 >>> c['a'] # count of letter 'a' 5 >>> for elem in 'shazam': # update counts from an iterable ... c[elem] += 1 # by adding 1 to each element's count >>> c['a'] # now there are seven 'a' 7 >>> del c['b'] # remove all 'b' >>> c['b'] # now there are zero 'b' 0 >>> d = Counter('simsalabim') # make another counter >>> c.update(d) # add in the second counter >>> c['a'] # now there are nine 'a' 9 >>> c.clear() # empty the counter >>> c Counter() Note: If a count is set to zero or reduced to zero, it will remain in the counter until the entry is deleted or the counter is cleared: >>> c = Counter('aaabbc') >>> c['b'] -= 2 # reduce the count of 'b' by two >>> c.most_common() # 'b' is still in, but its count is zero [('a', 3), ('c', 1), ('b', 0)] ''' # References: # http://en.wikipedia.org/wiki/Multiset # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm # http://code.activestate.com/recipes/259174/ # Knuth, TAOCP Vol. II section 4.6.3 def __init__(self, iterable=None, /, **kwds): '''Create a new, empty Counter object. And if given, count elements from an input iterable. Or, initialize the count from another mapping of elements to their counts. >>> c = Counter() # a new, empty counter >>> c = Counter('gallahad') # a new counter from an iterable >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping >>> c = Counter(a=4, b=2) # a new counter from keyword args ''' super().__init__() self.update(iterable, **kwds) def __missing__(self, key): 'The count of elements not in the Counter is zero.' # Needed so that self[missing_item] does not raise KeyError return 0 def total(self): 'Sum of the counts' return sum(self.values()) def most_common(self, n=None): '''List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. >>> Counter('abracadabra').most_common(3) [('a', 5), ('b', 2), ('r', 2)] ''' # Emulate Bag.sortedByCount from Smalltalk if n is None: return sorted(self.items(), key=_itemgetter(1), reverse=True) # Lazy import to speedup Python startup time import heapq return heapq.nlargest(n, self.items(), key=_itemgetter(1)) def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> import math >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> math.prod(prime_factors.elements()) 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary @classmethod def fromkeys(cls, iterable, v=None): # There is no equivalent method for counters because the semantics # would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2). # Initializing counters to zero values isn't necessary because zero # is already the default value for counter lookups. Initializing # to one is easily accomplished with Counter(set(iterable)). For # more exotic cases, create a dictionary first using a dictionary # comprehension or dict.fromkeys(). raise NotImplementedError( 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') def update(self, iterable=None, /, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in some of the original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if iterable is not None: if isinstance(iterable, _collections_abc.Mapping): if self: self_get = self.get for elem, count in iterable.items(): self[elem] = count + self_get(elem, 0) else: # fast path when counter is empty super().update(iterable) else: _count_elements(self, iterable) if kwds: self.update(kwds) def subtract(self, iterable=None, /, **kwds): '''Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.subtract('witch') # subtract elements from another iterable >>> c.subtract(Counter('watch')) # subtract elements from another counter >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch -1 ''' if iterable is not None: self_get = self.get if isinstance(iterable, _collections_abc.Mapping): for elem, count in iterable.items(): self[elem] = self_get(elem, 0) - count else: for elem in iterable: self[elem] = self_get(elem, 0) - 1 if kwds: self.subtract(kwds) def copy(self): 'Return a shallow copy.' return self.__class__(self) def __reduce__(self): return self.__class__, (dict(self),) def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: super().__delitem__(elem) def __repr__(self): if not self: return f'{self.__class__.__name__}()' try: # dict() preserves the ordering returned by most_common() d = dict(self.most_common()) except TypeError: # handle case where values are not orderable d = dict(self) return f'{self.__class__.__name__}({d!r})' # Multiset-style mathematical operations discussed in: # Knuth TAOCP Volume II section 4.6.3 exercise 19 # and at http://en.wikipedia.org/wiki/Multiset # # Outputs guaranteed to only include positive counts. # # To strip negative and zero counts, add-in an empty counter: # c += Counter() # # Results are ordered according to when an element is first # encountered in the left operand and then by the order # encountered in the right operand. # # When the multiplicities are all zero or one, multiset operations # are guaranteed to be equivalent to the corresponding operations # for regular sets. # Given counter multisets such as: # cp = Counter(a=1, b=0, c=1) # cq = Counter(c=1, d=0, e=1) # The corresponding regular sets would be: # sp = {'a', 'c'} # sq = {'c', 'e'} # All of the following relations would hold: # set(cp + cq) == sp | sq # set(cp - cq) == sp - sq # set(cp | cq) == sp | sq # set(cp & cq) == sp & sq # (cp == cq) == (sp == sq) # (cp != cq) == (sp != sq) # (cp <= cq) == (sp <= sq) # (cp < cq) == (sp < sq) # (cp >= cq) == (sp >= sq) # (cp > cq) == (sp > sq) def __eq__(self, other): 'True if all counts agree. Missing counts are treated as zero.' if not isinstance(other, Counter): return NotImplemented return all(self[e] == other[e] for c in (self, other) for e in c) def __ne__(self, other): 'True if any counts disagree. Missing counts are treated as zero.' if not isinstance(other, Counter): return NotImplemented return not self == other def __le__(self, other): 'True if all counts in self are a subset of those in other.' if not isinstance(other, Counter): return NotImplemented return all(self[e] <= other[e] for c in (self, other) for e in c) def __lt__(self, other): 'True if all counts in self are a proper subset of those in other.' if not isinstance(other, Counter): return NotImplemented return self <= other and self != other def __ge__(self, other): 'True if all counts in self are a superset of those in other.' if not isinstance(other, Counter): return NotImplemented return all(self[e] >= other[e] for c in (self, other) for e in c) def __gt__(self, other): 'True if all counts in self are a proper superset of those in other.' if not isinstance(other, Counter): return NotImplemented return self >= other and self != other def __add__(self, other): '''Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count + other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __sub__(self, other): ''' Subtract count, but keep only results with positive counts. >>> Counter('abbbc') - Counter('bccd') Counter({'b': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count - other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count < 0: result[elem] = 0 - count return result def __or__(self, other): '''Union is the maximum of value in either of the input counters. >>> Counter('abbb') | Counter('bcc') Counter({'b': 3, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = other_count if count < other_count else count if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __and__(self, other): ''' Intersection is the minimum of corresponding counts. >>> Counter('abbb') & Counter('bcc') Counter({'b': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = count if count < other_count else other_count if newcount > 0: result[elem] = newcount return result def __pos__(self): 'Adds an empty counter, effectively stripping negative and zero counts' result = Counter() for elem, count in self.items(): if count > 0: result[elem] = count return result def __neg__(self): '''Subtracts from an empty counter. Strips positive and zero counts, and flips the sign on negative counts. ''' result = Counter() for elem, count in self.items(): if count < 0: result[elem] = 0 - count return result def _keep_positive(self): '''Internal method to strip elements with a negative or zero count''' nonpositive = [elem for elem, count in self.items() if not count > 0] for elem in nonpositive: del self[elem] return self def __iadd__(self, other): '''Inplace add from another counter, keeping only positive counts. >>> c = Counter('abbb') >>> c += Counter('bcc') >>> c Counter({'b': 4, 'c': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] += count return self._keep_positive() def __isub__(self, other): '''Inplace subtract counter, but keep only results with positive counts. >>> c = Counter('abbbc') >>> c -= Counter('bccd') >>> c Counter({'b': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] -= count return self._keep_positive() def __ior__(self, other): '''Inplace union is the maximum of value from either counter. >>> c = Counter('abbb') >>> c |= Counter('bcc') >>> c Counter({'b': 3, 'c': 2, 'a': 1}) ''' for elem, other_count in other.items(): count = self[elem] if other_count > count: self[elem] = other_count return self._keep_positive() def __iand__(self, other): '''Inplace intersection is the minimum of corresponding counts. >>> c = Counter('abbb') >>> c &= Counter('bcc') >>> c Counter({'b': 1}) ''' for elem, count in self.items(): other_count = other[elem] if other_count < count: self[elem] = other_count return self._keep_positive() ######################################################################## ### ChainMap ######################################################################## class ChainMap(_collections_abc.MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can be accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): d = {} for mapping in reversed(self.maps): d.update(dict.fromkeys(mapping)) # reuses stored hash values if possible return iter(d) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) @_recursive_repr() def __repr__(self): return f'{self.__class__.__name__}({", ".join(map(repr, self.maps))})' @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self, m=None, **kwargs): # like Django's Context.push() '''New ChainMap with a new map followed by all previous maps. If no map is provided, an empty dict is used. Keyword arguments update the map or new empty dict. ''' if m is None: m = kwargs elif kwargs: m.update(kwargs) return self.__class__(m, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError(f'Key not found in the first mapping: {key!r}') def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError(f'Key not found in the first mapping: {key!r}') def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() def __ior__(self, other): self.maps[0].update(other) return self def __or__(self, other): if not isinstance(other, _collections_abc.Mapping): return NotImplemented m = self.copy() m.maps[0].update(other) return m def __ror__(self, other): if not isinstance(other, _collections_abc.Mapping): return NotImplemented m = dict(other) for child in reversed(self.maps): m.update(child) return self.__class__(m) ################################################################################ ### UserDict ################################################################################ class UserDict(_collections_abc.MutableMapping): # Start by filling-out the abstract methods def __init__(self, dict=None, /, **kwargs): self.data = {} if dict is not None: self.update(dict) if kwargs: self.update(kwargs) def __len__(self): return len(self.data) def __getitem__(self, key): if key in self.data: return self.data[key] if hasattr(self.__class__, "__missing__"): return self.__class__.__missing__(self, key) raise KeyError(key) def __setitem__(self, key, item): self.data[key] = item def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) # Modify __contains__ to work correctly when __missing__ is present def __contains__(self, key): return key in self.data # Now, add the methods in dicts but not in MutableMapping def __repr__(self): return repr(self.data) def __or__(self, other): if isinstance(other, UserDict): return self.__class__(self.data | other.data) if isinstance(other, dict): return self.__class__(self.data | other) return NotImplemented def __ror__(self, other): if isinstance(other, UserDict): return self.__class__(other.data | self.data) if isinstance(other, dict): return self.__class__(other | self.data) return NotImplemented def __ior__(self, other): if isinstance(other, UserDict): self.data |= other.data else: self.data |= other return self def __copy__(self): inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"].copy() return inst def copy(self): if self.__class__ is UserDict: return UserDict(self.data.copy()) import copy data = self.data try: self.data = {} c = copy.copy(self) finally: self.data = data c.update(self) return c @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d ################################################################################ ### UserList ################################################################################ class UserList(_collections_abc.MutableSequence): """A more or less complete user-defined wrapper around list objects.""" def __init__(self, initlist=None): self.data = [] if initlist is not None: # XXX should this accept an arbitrary sequence? if type(initlist) == type(self.data): self.data[:] = initlist elif isinstance(initlist, UserList): self.data[:] = initlist.data[:] else: self.data = list(initlist) def __repr__(self): return repr(self.data) def __lt__(self, other): return self.data < self.__cast(other) def __le__(self, other): return self.data <= self.__cast(other) def __eq__(self, other): return self.data == self.__cast(other) def __gt__(self, other): return self.data > self.__cast(other) def __ge__(self, other): return self.data >= self.__cast(other) def __cast(self, other): return other.data if isinstance(other, UserList) else other def __contains__(self, item): return item in self.data def __len__(self): return len(self.data) def __getitem__(self, i): if isinstance(i, slice): return self.__class__(self.data[i]) else: return self.data[i] def __setitem__(self, i, item): self.data[i] = item def __delitem__(self, i): del self.data[i] def __add__(self, other): if isinstance(other, UserList): return self.__class__(self.data + other.data) elif isinstance(other, type(self.data)): return self.__class__(self.data + other) return self.__class__(self.data + list(other)) def __radd__(self, other): if isinstance(other, UserList): return self.__class__(other.data + self.data) elif isinstance(other, type(self.data)): return self.__class__(other + self.data) return self.__class__(list(other) + self.data) def __iadd__(self, other): if isinstance(other, UserList): self.data += other.data elif isinstance(other, type(self.data)): self.data += other else: self.data += list(other) return self def __mul__(self, n): return self.__class__(self.data * n) __rmul__ = __mul__ def __imul__(self, n): self.data *= n return self def __copy__(self): inst = self.__class__.__new__(self.__class__) inst.__dict__.update(self.__dict__) # Create a copy and avoid triggering descriptors inst.__dict__["data"] = self.__dict__["data"][:] return inst def append(self, item): self.data.append(item) def insert(self, i, item): self.data.insert(i, item) def pop(self, i=-1): return self.data.pop(i) def remove(self, item): self.data.remove(item) def clear(self): self.data.clear() def copy(self): return self.__class__(self) def count(self, item): return self.data.count(item) def index(self, item, *args): return self.data.index(item, *args) def reverse(self): self.data.reverse() def sort(self, /, *args, **kwds): self.data.sort(*args, **kwds) def extend(self, other): if isinstance(other, UserList): self.data.extend(other.data) else: self.data.extend(other) ################################################################################ ### UserString ################################################################################ class UserString(_collections_abc.Sequence): def __init__(self, seq): if isinstance(seq, str): self.data = seq elif isinstance(seq, UserString): self.data = seq.data[:] else: self.data = str(seq) def __str__(self): return str(self.data) def __repr__(self): return repr(self.data) def __int__(self): return int(self.data) def __float__(self): return float(self.data) def __complex__(self): return complex(self.data) def __hash__(self): return hash(self.data) def __getnewargs__(self): return (self.data[:],) def __eq__(self, string): if isinstance(string, UserString): return self.data == string.data return self.data == string def __lt__(self, string): if isinstance(string, UserString): return self.data < string.data return self.data < string def __le__(self, string): if isinstance(string, UserString): return self.data <= string.data return self.data <= string def __gt__(self, string): if isinstance(string, UserString): return self.data > string.data return self.data > string def __ge__(self, string): if isinstance(string, UserString): return self.data >= string.data return self.data >= string def __contains__(self, char): if isinstance(char, UserString): char = char.data return char in self.data def __len__(self): return len(self.data) def __getitem__(self, index): return self.__class__(self.data[index]) def __add__(self, other): if isinstance(other, UserString): return self.__class__(self.data + other.data) elif isinstance(other, str): return self.__class__(self.data + other) return self.__class__(self.data + str(other)) def __radd__(self, other): if isinstance(other, str): return self.__class__(other + self.data) return self.__class__(str(other) + self.data) def __mul__(self, n): return self.__class__(self.data * n) __rmul__ = __mul__ def __mod__(self, args): return self.__class__(self.data % args) def __rmod__(self, template): return self.__class__(str(template) % self) # the following methods are defined in alphabetical order: def capitalize(self): return self.__class__(self.data.capitalize()) def casefold(self): return self.__class__(self.data.casefold()) def center(self, width, *args): return self.__class__(self.data.center(width, *args)) def count(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.count(sub, start, end) def removeprefix(self, prefix, /): if isinstance(prefix, UserString): prefix = prefix.data return self.__class__(self.data.removeprefix(prefix)) def removesuffix(self, suffix, /): if isinstance(suffix, UserString): suffix = suffix.data return self.__class__(self.data.removesuffix(suffix)) def encode(self, encoding='utf-8', errors='strict'): encoding = 'utf-8' if encoding is None else encoding errors = 'strict' if errors is None else errors return self.data.encode(encoding, errors) def endswith(self, suffix, start=0, end=_sys.maxsize): return self.data.endswith(suffix, start, end) def expandtabs(self, tabsize=8): return self.__class__(self.data.expandtabs(tabsize)) def find(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.find(sub, start, end) def format(self, /, *args, **kwds): return self.data.format(*args, **kwds) def format_map(self, mapping): return self.data.format_map(mapping) def index(self, sub, start=0, end=_sys.maxsize): return self.data.index(sub, start, end) def isalpha(self): return self.data.isalpha() def isalnum(self): return self.data.isalnum() def isascii(self): return self.data.isascii() def isdecimal(self): return self.data.isdecimal() def isdigit(self): return self.data.isdigit() def isidentifier(self): return self.data.isidentifier() def islower(self): return self.data.islower() def isnumeric(self): return self.data.isnumeric() def isprintable(self): return self.data.isprintable() def isspace(self): return self.data.isspace() def istitle(self): return self.data.istitle() def isupper(self): return self.data.isupper() def join(self, seq): return self.data.join(seq) def ljust(self, width, *args): return self.__class__(self.data.ljust(width, *args)) def lower(self): return self.__class__(self.data.lower()) def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) maketrans = str.maketrans def partition(self, sep): return self.data.partition(sep) def replace(self, old, new, maxsplit=-1): if isinstance(old, UserString): old = old.data if isinstance(new, UserString): new = new.data return self.__class__(self.data.replace(old, new, maxsplit)) def rfind(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.rfind(sub, start, end) def rindex(self, sub, start=0, end=_sys.maxsize): return self.data.rindex(sub, start, end) def rjust(self, width, *args): return self.__class__(self.data.rjust(width, *args)) def rpartition(self, sep): return self.data.rpartition(sep) def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars)) def split(self, sep=None, maxsplit=-1): return self.data.split(sep, maxsplit) def rsplit(self, sep=None, maxsplit=-1): return self.data.rsplit(sep, maxsplit) def splitlines(self, keepends=False): return self.data.splitlines(keepends) def startswith(self, prefix, start=0, end=_sys.maxsize): return self.data.startswith(prefix, start, end) def strip(self, chars=None): return self.__class__(self.data.strip(chars)) def swapcase(self): return self.__class__(self.data.swapcase()) def title(self): return self.__class__(self.data.title()) def translate(self, *args): return self.__class__(self.data.translate(*args)) def upper(self): return self.__class__(self.data.upper()) def zfill(self, width): return self.__class__(self.data.zfill(width)) usr/lib64/python3.11/importlib/__init__.py000064400000013711151027156320014240 0ustar00"""A pure Python implementation of import.""" __all__ = ['__import__', 'import_module', 'invalidate_caches', 'reload'] # Bootstrap help ##################################################### # Until bootstrapping is complete, DO NOT import any modules that attempt # to import importlib._bootstrap (directly or indirectly). Since this # partially initialised package would be present in sys.modules, those # modules would get an uninitialised copy of the source version, instead # of a fully initialised version (either the frozen one or the one # initialised below if the frozen one is not available). import _imp # Just the builtin component, NOT the full Python module import sys try: import _frozen_importlib as _bootstrap except ImportError: from . import _bootstrap _bootstrap._setup(sys, _imp) else: # importlib._bootstrap is the built-in import, ensure we don't create # a second copy of the module. _bootstrap.__name__ = 'importlib._bootstrap' _bootstrap.__package__ = 'importlib' try: _bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py') except NameError: # __file__ is not guaranteed to be defined, e.g. if this code gets # frozen by a tool like cx_Freeze. pass sys.modules['importlib._bootstrap'] = _bootstrap try: import _frozen_importlib_external as _bootstrap_external except ImportError: from . import _bootstrap_external _bootstrap_external._set_bootstrap_module(_bootstrap) _bootstrap._bootstrap_external = _bootstrap_external else: _bootstrap_external.__name__ = 'importlib._bootstrap_external' _bootstrap_external.__package__ = 'importlib' try: _bootstrap_external.__file__ = __file__.replace('__init__.py', '_bootstrap_external.py') except NameError: # __file__ is not guaranteed to be defined, e.g. if this code gets # frozen by a tool like cx_Freeze. pass sys.modules['importlib._bootstrap_external'] = _bootstrap_external # To simplify imports in test code _pack_uint32 = _bootstrap_external._pack_uint32 _unpack_uint32 = _bootstrap_external._unpack_uint32 # Fully bootstrapped at this point, import whatever you like, circular # dependencies and startup overhead minimisation permitting :) import warnings # Public API ######################################################### from ._bootstrap import __import__ def invalidate_caches(): """Call the invalidate_caches() method on all meta path finders stored in sys.meta_path (where implemented).""" for finder in sys.meta_path: if hasattr(finder, 'invalidate_caches'): finder.invalidate_caches() def find_loader(name, path=None): """Return the loader for the specified module. This is a backward-compatible wrapper around find_spec(). This function is deprecated in favor of importlib.util.find_spec(). """ warnings.warn('Deprecated since Python 3.4 and slated for removal in ' 'Python 3.12; use importlib.util.find_spec() instead', DeprecationWarning, stacklevel=2) try: loader = sys.modules[name].__loader__ if loader is None: raise ValueError('{}.__loader__ is None'.format(name)) else: return loader except KeyError: pass except AttributeError: raise ValueError('{}.__loader__ is not set'.format(name)) from None spec = _bootstrap._find_spec(name, path) # We won't worry about malformed specs (missing attributes). if spec is None: return None if spec.loader is None: if spec.submodule_search_locations is None: raise ImportError('spec for {} missing loader'.format(name), name=name) raise ImportError('namespace packages do not have loaders', name=name) return spec.loader def import_module(name, package=None): """Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import. """ level = 0 if name.startswith('.'): if not package: msg = ("the 'package' argument is required to perform a relative " "import for {!r}") raise TypeError(msg.format(name)) for character in name: if character != '.': break level += 1 return _bootstrap._gcd_import(name[level:], package, level) _RELOADING = {} def reload(module): """Reload the module and return it. The module must have been successfully imported before. """ try: name = module.__spec__.name except AttributeError: try: name = module.__name__ except AttributeError: raise TypeError("reload() argument must be a module") if sys.modules.get(name) is not module: msg = "module {} not in sys.modules" raise ImportError(msg.format(name), name=name) if name in _RELOADING: return _RELOADING[name] _RELOADING[name] = module try: parent_name = name.rpartition('.')[0] if parent_name: try: parent = sys.modules[parent_name] except KeyError: msg = "parent {!r} not in sys.modules" raise ImportError(msg.format(parent_name), name=parent_name) from None else: pkgpath = parent.__path__ else: pkgpath = None target = module spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target) if spec is None: raise ModuleNotFoundError(f"spec not found for the module {name!r}", name=name) _bootstrap._exec(spec, module) # The module may have replaced itself in sys.modules! return sys.modules[name] finally: try: del _RELOADING[name] except KeyError: pass usr/lib64/python3.6/concurrent/__init__.py000064400000000046151027317640014346 0ustar00# This directory is a Python package. usr/lib64/python3.11/xml/parsers/__init__.py000064400000000247151027326170014520 0ustar00"""Python interfaces to XML parsers. This package contains one module: expat -- Python wrapper for James Clark's Expat parser, with namespace support. """ usr/lib/python3.6/site-packages/isc/__init__.py000064400000001651151027326260015311 0ustar00############################################################################ # Copyright (C) Internet Systems Consortium, Inc. ("ISC") # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, you can obtain one at https://mozilla.org/MPL/2.0/. # # See the COPYRIGHT file distributed with this work for additional # information regarding copyright ownership. ############################################################################ __all__ = ['checkds', 'coverage', 'keymgr', 'dnskey', 'eventlist', 'keydict', 'keyevent', 'keyseries', 'keyzone', 'policy', 'parsetab', 'rndc', 'utils'] from isc.dnskey import * from isc.eventlist import * from isc.keydict import * from isc.keyevent import * from isc.keyseries import * from isc.keyzone import * from isc.policy import * from isc.rndc import * from isc.utils import * usr/lib64/python3.6/site-packages/libdnf/__init__.py000064400000000727151027334000016135 0ustar00from __future__ import absolute_import # error needs to be imported first and with global visibility for its symbols, # as it defines a python exception, which is a global variable and the other # modules use the symbol. import sys, os sys.setdlopenflags(os.RTLD_NOW | os.RTLD_GLOBAL) from . import error sys.setdlopenflags(os.RTLD_NOW) from . import common_types from . import conf from . import module from . import repo from . import transaction from . import utils usr/lib64/python3.6/xml/parsers/__init__.py000064400000000247151027334250014442 0ustar00"""Python interfaces to XML parsers. This package contains one module: expat -- Python wrapper for James Clark's Expat parser, with namespace support. """ usr/lib64/python2.7/lib2to3/pgen2/__init__.py000064400000000217151027336600014452 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """The pgen2 package.""" usr/lib64/python2.7/lib2to3/fixes/__init__.py000064400000000057151027337400014556 0ustar00# Dummy file to make this directory a package. usr/lib64/python2.7/xml/etree/__init__.py000064400000003104151027337570014072 0ustar00# $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $ # elementtree package # -------------------------------------------------------------------- # The ElementTree toolkit is # # Copyright (c) 1999-2008 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # -------------------------------------------------------------------- # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/psf/license for licensing details. usr/lib/python3.6/site-packages/bs4/__init__.py000064400000054530151027346140015226 0ustar00"""Beautiful Soup Elixir and Tonic "The Screen-Scraper's Friend" http://www.crummy.com/software/BeautifulSoup/ Beautiful Soup uses a pluggable XML or HTML parser to parse a (possibly invalid) document into a tree representation. Beautiful Soup provides methods and Pythonic idioms that make it easy to navigate, search, and modify the parse tree. Beautiful Soup works with Python 2.7 and up. It works better if lxml and/or html5lib is installed. For more than you ever wanted to know about Beautiful Soup, see the documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/ """ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. __author__ = "Leonard Richardson (leonardr@segfault.org)" __version__ = "4.6.3" __copyright__ = "Copyright (c) 2004-2018 Leonard Richardson" __license__ = "MIT" __all__ = ['BeautifulSoup'] import os import re import sys import traceback import warnings from .builder import builder_registry, ParserRejectedMarkup from .dammit import UnicodeDammit from .element import ( CData, Comment, DEFAULT_OUTPUT_ENCODING, Declaration, Doctype, NavigableString, PageElement, ProcessingInstruction, ResultSet, SoupStrainer, Tag, ) # The very first thing we do is give a useful error if someone is # running this code under Python 3 without converting it. 'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'!='You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).' class BeautifulSoup(Tag): """ This class defines the basic interface called by the tree builders. These methods will be called by the parser: reset() feed(markup) The tree builder may call these methods from its feed() implementation: handle_starttag(name, attrs) # See note about return value handle_endtag(name) handle_data(data) # Appends to the current data node endData(containerClass=NavigableString) # Ends the current data node No matter how complicated the underlying parser is, you should be able to build a tree using 'start tag' events, 'end tag' events, 'data' events, and "done with data" events. If you encounter an empty-element tag (aka a self-closing tag, like HTML's
tag), call handle_starttag and then handle_endtag. """ ROOT_TAG_NAME = '[document]' # If the end-user gives no indication which tree builder they # want, look for one with these features. DEFAULT_BUILDER_FEATURES = ['html', 'fast'] ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n" def __init__(self, markup="", features=None, builder=None, parse_only=None, from_encoding=None, exclude_encodings=None, **kwargs): """Constructor. :param markup: A string or a file-like object representing markup to be parsed. :param features: Desirable features of the parser to be used. This may be the name of a specific parser ("lxml", "lxml-xml", "html.parser", or "html5lib") or it may be the type of markup to be used ("html", "html5", "xml"). It's recommended that you name a specific parser, so that Beautiful Soup gives you the same results across platforms and virtual environments. :param builder: A specific TreeBuilder to use instead of looking one up based on `features`. You shouldn't need to use this. :param parse_only: A SoupStrainer. Only parts of the document matching the SoupStrainer will be considered. This is useful when parsing part of a document that would otherwise be too large to fit into memory. :param from_encoding: A string indicating the encoding of the document to be parsed. Pass this in if Beautiful Soup is guessing wrongly about the document's encoding. :param exclude_encodings: A list of strings indicating encodings known to be wrong. Pass this in if you don't know the document's encoding but you know Beautiful Soup's guess is wrong. :param kwargs: For backwards compatibility purposes, the constructor accepts certain keyword arguments used in Beautiful Soup 3. None of these arguments do anything in Beautiful Soup 4 and there's no need to actually pass keyword arguments into the constructor. """ if 'convertEntities' in kwargs: warnings.warn( "BS4 does not respect the convertEntities argument to the " "BeautifulSoup constructor. Entities are always converted " "to Unicode characters.") if 'markupMassage' in kwargs: del kwargs['markupMassage'] warnings.warn( "BS4 does not respect the markupMassage argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for any necessary markup massage.") if 'smartQuotesTo' in kwargs: del kwargs['smartQuotesTo'] warnings.warn( "BS4 does not respect the smartQuotesTo argument to the " "BeautifulSoup constructor. Smart quotes are always converted " "to Unicode characters.") if 'selfClosingTags' in kwargs: del kwargs['selfClosingTags'] warnings.warn( "BS4 does not respect the selfClosingTags argument to the " "BeautifulSoup constructor. The tree builder is responsible " "for understanding self-closing tags.") if 'isHTML' in kwargs: del kwargs['isHTML'] warnings.warn( "BS4 does not respect the isHTML argument to the " "BeautifulSoup constructor. Suggest you use " "features='lxml' for HTML and features='lxml-xml' for " "XML.") def deprecated_argument(old_name, new_name): if old_name in kwargs: warnings.warn( 'The "%s" argument to the BeautifulSoup constructor ' 'has been renamed to "%s."' % (old_name, new_name)) value = kwargs[old_name] del kwargs[old_name] return value return None parse_only = parse_only or deprecated_argument( "parseOnlyThese", "parse_only") from_encoding = from_encoding or deprecated_argument( "fromEncoding", "from_encoding") if from_encoding and isinstance(markup, str): warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.") from_encoding = None if len(kwargs) > 0: arg = list(kwargs.keys()).pop() raise TypeError( "__init__() got an unexpected keyword argument '%s'" % arg) if builder is None: original_features = features if isinstance(features, str): features = [features] if features is None or len(features) == 0: features = self.DEFAULT_BUILDER_FEATURES builder_class = builder_registry.lookup(*features) if builder_class is None: raise FeatureNotFound( "Couldn't find a tree builder with the features you " "requested: %s. Do you need to install a parser library?" % ",".join(features)) builder = builder_class() if not (original_features == builder.NAME or original_features in builder.ALTERNATE_NAMES): if builder.is_xml: markup_type = "XML" else: markup_type = "HTML" # This code adapted from warnings.py so that we get the same line # of code as our warnings.warn() call gets, even if the answer is wrong # (as it may be in a multithreading situation). caller = None try: caller = sys._getframe(1) except ValueError: pass if caller: globals = caller.f_globals line_number = caller.f_lineno else: globals = sys.__dict__ line_number= 1 filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith((".pyc", ".pyo")): filename = filename[:-1] if filename: # If there is no filename at all, the user is most likely in a REPL, # and the warning is not necessary. values = dict( filename=filename, line_number=line_number, parser=builder.NAME, markup_type=markup_type ) warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % values, stacklevel=2) self.builder = builder self.is_xml = builder.is_xml self.known_xml = self.is_xml self.builder.soup = self self.parse_only = parse_only if hasattr(markup, 'read'): # It's a file-type object. markup = markup.read() elif len(markup) <= 256 and ( (isinstance(markup, bytes) and not b'<' in markup) or (isinstance(markup, str) and not '<' in markup) ): # Print out warnings for a couple beginner problems # involving passing non-markup to Beautiful Soup. # Beautiful Soup will still parse the input as markup, # just in case that's what the user really wants. if (isinstance(markup, str) and not os.path.supports_unicode_filenames): possible_filename = markup.encode("utf8") else: possible_filename = markup is_file = False try: is_file = os.path.exists(possible_filename) except Exception as e: # This is almost certainly a problem involving # characters not valid in filenames on this # system. Just let it go. pass if is_file: if isinstance(markup, str): markup = markup.encode("utf8") warnings.warn( '"%s" looks like a filename, not markup. You should' ' probably open this file and pass the filehandle into' ' Beautiful Soup.' % markup) self._check_markup_is_url(markup) for (self.markup, self.original_encoding, self.declared_html_encoding, self.contains_replacement_characters) in ( self.builder.prepare_markup( markup, from_encoding, exclude_encodings=exclude_encodings)): self.reset() try: self._feed() break except ParserRejectedMarkup: pass # Clear out the markup and remove the builder's circular # reference to this object. self.markup = None self.builder.soup = None def __copy__(self): copy = type(self)( self.encode('utf-8'), builder=self.builder, from_encoding='utf-8' ) # Although we encoded the tree to UTF-8, that may not have # been the encoding of the original markup. Set the copy's # .original_encoding to reflect the original object's # .original_encoding. copy.original_encoding = self.original_encoding return copy def __getstate__(self): # Frequently a tree builder can't be pickled. d = dict(self.__dict__) if 'builder' in d and not self.builder.picklable: d['builder'] = None return d @staticmethod def _check_markup_is_url(markup): """ Check if markup looks like it's actually a url and raise a warning if so. Markup can be unicode or str (py2) / bytes (py3). """ if isinstance(markup, bytes): space = b' ' cant_start_with = (b"http:", b"https:") elif isinstance(markup, str): space = ' ' cant_start_with = ("http:", "https:") else: return if any(markup.startswith(prefix) for prefix in cant_start_with): if not space in markup: if isinstance(markup, bytes): decoded_markup = markup.decode('utf-8', 'replace') else: decoded_markup = markup warnings.warn( '"%s" looks like a URL. Beautiful Soup is not an' ' HTTP client. You should probably use an HTTP client like' ' requests to get the document behind the URL, and feed' ' that document to Beautiful Soup.' % decoded_markup ) def _feed(self): # Convert the document to Unicode. self.builder.reset() self.builder.feed(self.markup) # Close out any unfinished strings and close all the open tags. self.endData() while self.currentTag.name != self.ROOT_TAG_NAME: self.popTag() def reset(self): Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) self.hidden = 1 self.builder.reset() self.current_data = [] self.currentTag = None self.tagStack = [] self.preserve_whitespace_tag_stack = [] self.pushTag(self) def new_tag(self, name, namespace=None, nsprefix=None, attrs={}, **kwattrs): """Create a new tag associated with this soup.""" kwattrs.update(attrs) return Tag(None, self.builder, name, namespace, nsprefix, kwattrs) def new_string(self, s, subclass=NavigableString): """Create a new NavigableString associated with this soup.""" return subclass(s) def insert_before(self, successor): raise NotImplementedError("BeautifulSoup objects don't support insert_before().") def insert_after(self, successor): raise NotImplementedError("BeautifulSoup objects don't support insert_after().") def popTag(self): tag = self.tagStack.pop() if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: self.preserve_whitespace_tag_stack.pop() #print "Pop", tag.name if self.tagStack: self.currentTag = self.tagStack[-1] return self.currentTag def pushTag(self, tag): #print "Push", tag.name if self.currentTag: self.currentTag.contents.append(tag) self.tagStack.append(tag) self.currentTag = self.tagStack[-1] if tag.name in self.builder.preserve_whitespace_tags: self.preserve_whitespace_tag_stack.append(tag) def endData(self, containerClass=NavigableString): if self.current_data: current_data = ''.join(self.current_data) # If whitespace is not preserved, and this string contains # nothing but ASCII spaces, replace it with a single space # or newline. if not self.preserve_whitespace_tag_stack: strippable = True for i in current_data: if i not in self.ASCII_SPACES: strippable = False break if strippable: if '\n' in current_data: current_data = '\n' else: current_data = ' ' # Reset the data collector. self.current_data = [] # Should we add this string to the tree at all? if self.parse_only and len(self.tagStack) <= 1 and \ (not self.parse_only.text or \ not self.parse_only.search(current_data)): return o = containerClass(current_data) self.object_was_parsed(o) def object_was_parsed(self, o, parent=None, most_recent_element=None): """Add an object to the parse tree.""" parent = parent or self.currentTag previous_element = most_recent_element or self._most_recent_element next_element = previous_sibling = next_sibling = None if isinstance(o, Tag): next_element = o.next_element next_sibling = o.next_sibling previous_sibling = o.previous_sibling if not previous_element: previous_element = o.previous_element o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) self._most_recent_element = o parent.contents.append(o) if parent.next_sibling: # This node is being inserted into an element that has # already been parsed. Deal with any dangling references. index = len(parent.contents)-1 while index >= 0: if parent.contents[index] is o: break index -= 1 else: raise ValueError( "Error building tree: supposedly %r was inserted " "into %r after the fact, but I don't see it!" % ( o, parent ) ) if index == 0: previous_element = parent previous_sibling = None else: previous_element = previous_sibling = parent.contents[index-1] if index == len(parent.contents)-1: next_element = parent.next_sibling next_sibling = None else: next_element = next_sibling = parent.contents[index+1] o.previous_element = previous_element if previous_element: previous_element.next_element = o o.next_element = next_element if next_element: next_element.previous_element = o o.next_sibling = next_sibling if next_sibling: next_sibling.previous_sibling = o o.previous_sibling = previous_sibling if previous_sibling: previous_sibling.next_sibling = o def _popToTag(self, name, nsprefix=None, inclusivePop=True): """Pops the tag stack up to and including the most recent instance of the given tag. If inclusivePop is false, pops the tag stack up to but *not* including the most recent instqance of the given tag.""" #print "Popping to %s" % name if name == self.ROOT_TAG_NAME: # The BeautifulSoup object itself can never be popped. return most_recently_popped = None stack_size = len(self.tagStack) for i in range(stack_size - 1, 0, -1): t = self.tagStack[i] if (name == t.name and nsprefix == t.prefix): if inclusivePop: most_recently_popped = self.popTag() break most_recently_popped = self.popTag() return most_recently_popped def handle_starttag(self, name, namespace, nsprefix, attrs): """Push a start tag on to the stack. If this method returns None, the tag was rejected by the SoupStrainer. You should proceed as if the tag had not occurred in the document. For instance, if this was a self-closing tag, don't call handle_endtag. """ # print "Start tag %s: %s" % (name, attrs) self.endData() if (self.parse_only and len(self.tagStack) <= 1 and (self.parse_only.text or not self.parse_only.search_tag(name, attrs))): return None tag = Tag(self, self.builder, name, namespace, nsprefix, attrs, self.currentTag, self._most_recent_element) if tag is None: return tag if self._most_recent_element: self._most_recent_element.next_element = tag self._most_recent_element = tag self.pushTag(tag) return tag def handle_endtag(self, name, nsprefix=None): #print "End tag: " + name self.endData() self._popToTag(name, nsprefix) def handle_data(self, data): self.current_data.append(data) def decode(self, pretty_print=False, eventual_encoding=DEFAULT_OUTPUT_ENCODING, formatter="minimal"): """Returns a string or Unicode representation of this document. To get Unicode, pass None for encoding.""" if self.is_xml: # Print the XML declaration encoding_part = '' if eventual_encoding != None: encoding_part = ' encoding="%s"' % eventual_encoding prefix = '\n' % encoding_part else: prefix = '' if not pretty_print: indent_level = None else: indent_level = 0 return prefix + super(BeautifulSoup, self).decode( indent_level, eventual_encoding, formatter) # Alias to make it easier to type import: 'from bs4 import _soup' _s = BeautifulSoup _soup = BeautifulSoup class BeautifulStoneSoup(BeautifulSoup): """Deprecated interface to an XML parser.""" def __init__(self, *args, **kwargs): kwargs['features'] = 'xml' warnings.warn( 'The BeautifulStoneSoup class is deprecated. Instead of using ' 'it, pass features="xml" into the BeautifulSoup constructor.') super(BeautifulStoneSoup, self).__init__(*args, **kwargs) class StopParsing(Exception): pass class FeatureNotFound(ValueError): pass #By default, act as an HTML pretty-printer. if __name__ == '__main__': import sys soup = BeautifulSoup(sys.stdin) print((soup.prettify())) usr/lib64/python3.11/email/__init__.py000064400000003344151027346640013335 0ustar00# Copyright (C) 2001-2007 Python Software Foundation # Author: Barry Warsaw # Contact: email-sig@python.org """A package for parsing, handling, and generating email messages.""" __all__ = [ 'base64mime', 'charset', 'encoders', 'errors', 'feedparser', 'generator', 'header', 'iterators', 'message', 'message_from_file', 'message_from_binary_file', 'message_from_string', 'message_from_bytes', 'mime', 'parser', 'quoprimime', 'utils', ] # Some convenience routines. Don't import Parser and Message as side-effects # of importing email since those cascadingly import most of the rest of the # email package. def message_from_string(s, *args, **kws): """Parse a string into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from email.parser import Parser return Parser(*args, **kws).parsestr(s) def message_from_bytes(s, *args, **kws): """Parse a bytes string into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from email.parser import BytesParser return BytesParser(*args, **kws).parsebytes(s) def message_from_file(fp, *args, **kws): """Read a file and parse its contents into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from email.parser import Parser return Parser(*args, **kws).parse(fp) def message_from_binary_file(fp, *args, **kws): """Read a binary file and parse its contents into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from email.parser import BytesParser return BytesParser(*args, **kws).parse(fp) usr/lib64/python3.6/importlib/__init__.py000064400000013356151027347410014173 0ustar00"""A pure Python implementation of import.""" __all__ = ['__import__', 'import_module', 'invalidate_caches', 'reload'] # Bootstrap help ##################################################### # Until bootstrapping is complete, DO NOT import any modules that attempt # to import importlib._bootstrap (directly or indirectly). Since this # partially initialised package would be present in sys.modules, those # modules would get an uninitialised copy of the source version, instead # of a fully initialised version (either the frozen one or the one # initialised below if the frozen one is not available). import _imp # Just the builtin component, NOT the full Python module import sys try: import _frozen_importlib as _bootstrap except ImportError: from . import _bootstrap _bootstrap._setup(sys, _imp) else: # importlib._bootstrap is the built-in import, ensure we don't create # a second copy of the module. _bootstrap.__name__ = 'importlib._bootstrap' _bootstrap.__package__ = 'importlib' try: _bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py') except NameError: # __file__ is not guaranteed to be defined, e.g. if this code gets # frozen by a tool like cx_Freeze. pass sys.modules['importlib._bootstrap'] = _bootstrap try: import _frozen_importlib_external as _bootstrap_external except ImportError: from . import _bootstrap_external _bootstrap_external._setup(_bootstrap) _bootstrap._bootstrap_external = _bootstrap_external else: _bootstrap_external.__name__ = 'importlib._bootstrap_external' _bootstrap_external.__package__ = 'importlib' try: _bootstrap_external.__file__ = __file__.replace('__init__.py', '_bootstrap_external.py') except NameError: # __file__ is not guaranteed to be defined, e.g. if this code gets # frozen by a tool like cx_Freeze. pass sys.modules['importlib._bootstrap_external'] = _bootstrap_external # To simplify imports in test code _w_long = _bootstrap_external._w_long _r_long = _bootstrap_external._r_long # Fully bootstrapped at this point, import whatever you like, circular # dependencies and startup overhead minimisation permitting :) import types import warnings # Public API ######################################################### from ._bootstrap import __import__ def invalidate_caches(): """Call the invalidate_caches() method on all meta path finders stored in sys.meta_path (where implemented).""" for finder in sys.meta_path: if hasattr(finder, 'invalidate_caches'): finder.invalidate_caches() def find_loader(name, path=None): """Return the loader for the specified module. This is a backward-compatible wrapper around find_spec(). This function is deprecated in favor of importlib.util.find_spec(). """ warnings.warn('Use importlib.util.find_spec() instead.', DeprecationWarning, stacklevel=2) try: loader = sys.modules[name].__loader__ if loader is None: raise ValueError('{}.__loader__ is None'.format(name)) else: return loader except KeyError: pass except AttributeError: raise ValueError('{}.__loader__ is not set'.format(name)) from None spec = _bootstrap._find_spec(name, path) # We won't worry about malformed specs (missing attributes). if spec is None: return None if spec.loader is None: if spec.submodule_search_locations is None: raise ImportError('spec for {} missing loader'.format(name), name=name) raise ImportError('namespace packages do not have loaders', name=name) return spec.loader def import_module(name, package=None): """Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import. """ level = 0 if name.startswith('.'): if not package: msg = ("the 'package' argument is required to perform a relative " "import for {!r}") raise TypeError(msg.format(name)) for character in name: if character != '.': break level += 1 return _bootstrap._gcd_import(name[level:], package, level) _RELOADING = {} def reload(module): """Reload the module and return it. The module must have been successfully imported before. """ if not module or not isinstance(module, types.ModuleType): raise TypeError("reload() argument must be a module") try: name = module.__spec__.name except AttributeError: name = module.__name__ if sys.modules.get(name) is not module: msg = "module {} not in sys.modules" raise ImportError(msg.format(name), name=name) if name in _RELOADING: return _RELOADING[name] _RELOADING[name] = module try: parent_name = name.rpartition('.')[0] if parent_name: try: parent = sys.modules[parent_name] except KeyError: msg = "parent {!r} not in sys.modules" raise ImportError(msg.format(parent_name), name=parent_name) from None else: pkgpath = parent.__path__ else: pkgpath = None target = module spec = module.__spec__ = _bootstrap._find_spec(name, pkgpath, target) _bootstrap._exec(spec, module) # The module may have replaced itself in sys.modules! return sys.modules[name] finally: try: del _RELOADING[name] except KeyError: pass usr/lib64/python2.7/logging/__init__.py000064400000170342151027350030013607 0ustar00# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Vinay Sajip # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ Logging package for Python. Based on PEP 282 and comments thereto in comp.lang.python. Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved. To use, simply 'import logging' and log away! """ import sys, os, time, cStringIO, traceback, warnings, weakref, collections __all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR', 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO', 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler', 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig', 'captureWarnings', 'critical', 'debug', 'disable', 'error', 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass', 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning'] try: import codecs except ImportError: codecs = None try: import thread import threading except ImportError: thread = None __author__ = "Vinay Sajip " __status__ = "production" # Note: the attributes below are no longer maintained. __version__ = "0.5.1.2" __date__ = "07 February 2010" #--------------------------------------------------------------------------- # Miscellaneous module data #--------------------------------------------------------------------------- try: unicode _unicode = True except NameError: _unicode = False # next bit filched from 1.5.2's inspect.py def currentframe(): """Return the frame object for the caller's stack frame.""" try: raise Exception except: return sys.exc_info()[2].tb_frame.f_back if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3) # done filching # # _srcfile is used when walking the stack to check when we've got the first # caller stack frame. # _srcfile = os.path.normcase(currentframe.__code__.co_filename) # _srcfile is only used in conjunction with sys._getframe(). # To provide compatibility with older versions of Python, set _srcfile # to None if _getframe() is not available; this value will prevent # findCaller() from being called. #if not hasattr(sys, "_getframe"): # _srcfile = None # #_startTime is used as the base when calculating the relative time of events # _startTime = time.time() # #raiseExceptions is used to see if exceptions during handling should be #propagated # raiseExceptions = 1 # # If you don't want threading information in the log, set this to zero # logThreads = 1 # # If you don't want multiprocessing information in the log, set this to zero # logMultiprocessing = 1 # # If you don't want process information in the log, set this to zero # logProcesses = 1 #--------------------------------------------------------------------------- # Level related stuff #--------------------------------------------------------------------------- # # Default levels and level names, these can be replaced with any positive set # of values having corresponding names. There is a pseudo-level, NOTSET, which # is only really there as a lower limit for user-defined levels. Handlers and # loggers are initialized with NOTSET so that they will log all messages, even # at user-defined levels. # CRITICAL = 50 FATAL = CRITICAL ERROR = 40 WARNING = 30 WARN = WARNING INFO = 20 DEBUG = 10 NOTSET = 0 _levelNames = { CRITICAL : 'CRITICAL', ERROR : 'ERROR', WARNING : 'WARNING', INFO : 'INFO', DEBUG : 'DEBUG', NOTSET : 'NOTSET', 'CRITICAL' : CRITICAL, 'ERROR' : ERROR, 'WARN' : WARNING, 'WARNING' : WARNING, 'INFO' : INFO, 'DEBUG' : DEBUG, 'NOTSET' : NOTSET, } def getLevelName(level): """ Return the textual representation of logging level 'level'. If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, INFO, DEBUG) then you get the corresponding string. If you have associated levels with names using addLevelName then the name you have associated with 'level' is returned. If a numeric value corresponding to one of the defined levels is passed in, the corresponding string representation is returned. Otherwise, the string "Level %s" % level is returned. """ return _levelNames.get(level, ("Level %s" % level)) def addLevelName(level, levelName): """ Associate 'levelName' with 'level'. This is used when converting levels to text during message formatting. """ _acquireLock() try: #unlikely to cause an exception, but you never know... _levelNames[level] = levelName _levelNames[levelName] = level finally: _releaseLock() def _checkLevel(level): if isinstance(level, (int, long)): rv = level elif str(level) == level: if level not in _levelNames: raise ValueError("Unknown level: %r" % level) rv = _levelNames[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv #--------------------------------------------------------------------------- # Thread-related stuff #--------------------------------------------------------------------------- # #_lock is used to serialize access to shared data structures in this module. #This needs to be an RLock because fileConfig() creates and configures #Handlers, and so might arbitrary user threads. Since Handler code updates the #shared dictionary _handlers, it needs to acquire the lock. But if configuring, #the lock would already have been acquired - so we need an RLock. #The same argument applies to Loggers and Manager.loggerDict. # if thread: _lock = threading.RLock() else: _lock = None def _acquireLock(): """ Acquire the module-level lock for serializing access to shared data. This should be released with _releaseLock(). """ if _lock: _lock.acquire() def _releaseLock(): """ Release the module-level lock acquired by calling _acquireLock(). """ if _lock: _lock.release() #--------------------------------------------------------------------------- # The logging record #--------------------------------------------------------------------------- class LogRecord(object): """ A LogRecord instance represents an event being logged. LogRecord instances are created every time something is logged. They contain all the information pertinent to the event being logged. The main information passed in is in msg and args, which are combined using str(msg) % args to create the message field of the record. The record also includes information such as when the record was created, the source line where the logging call was made, and any exception information to be logged. """ def __init__(self, name, level, pathname, lineno, msg, args, exc_info, func=None): """ Initialize a logging record with interesting information. """ ct = time.time() self.name = name self.msg = msg # # The following statement allows passing of a dictionary as a sole # argument, so that you can do something like # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) # Suggested by Stefan Behnel. # Note that without the test for args[0], we get a problem because # during formatting, we test to see if the arg is present using # 'if self.args:'. If the event being logged is e.g. 'Value is %d' # and if the passed arg fails 'if self.args:' then no formatting # is done. For example, logger.warn('Value is %d', 0) would log # 'Value is %d' instead of 'Value is 0'. # For the use case of passing a dictionary, this should not be a # problem. # Issue #21172: a request was made to relax the isinstance check # to hasattr(args[0], '__getitem__'). However, the docs on string # formatting still seem to suggest a mapping object is required. # Thus, while not removing the isinstance check, it does now look # for collections.Mapping rather than, as before, dict. if (args and len(args) == 1 and isinstance(args[0], collections.Mapping) and args[0]): args = args[0] self.args = args self.levelname = getLevelName(level) self.levelno = level self.pathname = pathname try: self.filename = os.path.basename(pathname) self.module = os.path.splitext(self.filename)[0] except (TypeError, ValueError, AttributeError): self.filename = pathname self.module = "Unknown module" self.exc_info = exc_info self.exc_text = None # used to cache the traceback text self.lineno = lineno self.funcName = func self.created = ct self.msecs = (ct - long(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads and thread: self.thread = thread.get_ident() self.threadName = threading.current_thread().name else: self.thread = None self.threadName = None if not logMultiprocessing: self.processName = None else: self.processName = 'MainProcess' mp = sys.modules.get('multiprocessing') if mp is not None: # Errors may occur if multiprocessing has not finished loading # yet - e.g. if a custom import hook causes third-party code # to run when multiprocessing calls import. See issue 8200 # for an example try: self.processName = mp.current_process().name except StandardError: pass if logProcesses and hasattr(os, 'getpid'): self.process = os.getpid() else: self.process = None def __str__(self): return ''%(self.name, self.levelno, self.pathname, self.lineno, self.msg) def getMessage(self): """ Return the message for this LogRecord. Return the message for this LogRecord after merging any user-supplied arguments with the message. """ if not _unicode: #if no unicode support... msg = str(self.msg) else: msg = self.msg if not isinstance(msg, basestring): try: msg = str(self.msg) except UnicodeError: msg = self.msg #Defer encoding till later if self.args: msg = msg % self.args return msg def makeLogRecord(dict): """ Make a LogRecord whose attributes are defined by the specified dictionary, This function is useful for converting a logging event received over a socket connection (which is sent as a dictionary) into a LogRecord instance. """ rv = LogRecord(None, None, "", 0, "", (), None, None) rv.__dict__.update(dict) return rv #--------------------------------------------------------------------------- # Formatter classes and functions #--------------------------------------------------------------------------- class Formatter(object): """ Formatter instances are used to convert a LogRecord to text. Formatters need to know how a LogRecord is constructed. They are responsible for converting a LogRecord to (usually) a string which can be interpreted by either a human or an external system. The base Formatter allows a formatting string to be specified. If none is supplied, the default value of "%s(message)\\n" is used. The Formatter can be initialized with a format string which makes use of knowledge of the LogRecord attributes - e.g. the default value mentioned above makes use of the fact that the user's message and arguments are pre- formatted into a LogRecord's message attribute. Currently, the useful attributes in a LogRecord are described by: %(name)s Name of the logger (logging channel) %(levelno)s Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL) %(levelname)s Text logging level for the message ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") %(pathname)s Full pathname of the source file where the logging call was issued (if available) %(filename)s Filename portion of pathname %(module)s Module (name portion of filename) %(lineno)d Source line number where the logging call was issued (if available) %(funcName)s Function name %(created)f Time when the LogRecord was created (time.time() return value) %(asctime)s Textual time when the LogRecord was created %(msecs)d Millisecond portion of the creation time %(relativeCreated)d Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded (typically at application startup time) %(thread)d Thread ID (if available) %(threadName)s Thread name (if available) %(process)d Process ID (if available) %(message)s The result of record.getMessage(), computed just as the record is emitted """ converter = time.localtime def __init__(self, fmt=None, datefmt=None): """ Initialize the formatter with specified format strings. Initialize the formatter either with the specified format string, or a default as described above. Allow for specialized date formatting with the optional datefmt argument (if omitted, you get the ISO8601 format). """ if fmt: self._fmt = fmt else: self._fmt = "%(message)s" self.datefmt = datefmt def formatTime(self, record, datefmt=None): """ Return the creation time of the specified LogRecord as formatted text. This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, the ISO8601 format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the 'converter' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the 'converter' attribute in the Formatter class. """ ct = self.converter(record.created) if datefmt: s = time.strftime(datefmt, ct) else: t = time.strftime("%Y-%m-%d %H:%M:%S", ct) s = "%s,%03d" % (t, record.msecs) return s def formatException(self, ei): """ Format and return the specified exception information as a string. This default implementation just uses traceback.print_exception() """ sio = cStringIO.StringIO() traceback.print_exception(ei[0], ei[1], ei[2], None, sio) s = sio.getvalue() sio.close() if s[-1:] == "\n": s = s[:-1] return s def usesTime(self): """ Check if the format uses the creation time of the record. """ return self._fmt.find("%(asctime)") >= 0 def format(self, record): """ Format the specified record as text. The record's attribute dictionary is used as the operand to a string formatting operation which yields the returned string. Before formatting the dictionary, a couple of preparatory steps are carried out. The message attribute of the record is computed using LogRecord.getMessage(). If the formatting string uses the time (as determined by a call to usesTime(), formatTime() is called to format the event time. If there is exception information, it is formatted using formatException() and appended to the message. """ record.message = record.getMessage() if self.usesTime(): record.asctime = self.formatTime(record, self.datefmt) try: s = self._fmt % record.__dict__ except UnicodeDecodeError as e: # Issue 25664. The logger name may be Unicode. Try again ... try: record.name = record.name.decode('utf-8') s = self._fmt % record.__dict__ except UnicodeDecodeError: raise e if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: if s[-1:] != "\n": s = s + "\n" try: s = s + record.exc_text except UnicodeError: # Sometimes filenames have non-ASCII chars, which can lead # to errors when s is Unicode and record.exc_text is str # See issue 8924. # We also use replace for when there are multiple # encodings, e.g. UTF-8 for the filesystem and latin-1 # for a script. See issue 13232. s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace') return s # # The default formatter to use when no other is specified # _defaultFormatter = Formatter() class BufferingFormatter(object): """ A formatter suitable for formatting a number of records. """ def __init__(self, linefmt=None): """ Optionally specify a formatter which will be used to format each individual record. """ if linefmt: self.linefmt = linefmt else: self.linefmt = _defaultFormatter def formatHeader(self, records): """ Return the header string for the specified records. """ return "" def formatFooter(self, records): """ Return the footer string for the specified records. """ return "" def format(self, records): """ Format the specified records and return the result as a string. """ rv = "" if len(records) > 0: rv = rv + self.formatHeader(records) for record in records: rv = rv + self.linefmt.format(record) rv = rv + self.formatFooter(records) return rv #--------------------------------------------------------------------------- # Filter classes and functions #--------------------------------------------------------------------------- class Filter(object): """ Filter instances are used to perform arbitrary filtering of LogRecords. Loggers and Handlers can optionally use Filter instances to filter records as desired. The base filter class only allows events which are below a certain point in the logger hierarchy. For example, a filter initialized with "A.B" will allow events logged by loggers "A.B", "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If initialized with the empty string, all events are passed. """ def __init__(self, name=''): """ Initialize a filter. Initialize with the name of the logger which, together with its children, will have its events allowed through the filter. If no name is specified, allow every event. """ self.name = name self.nlen = len(name) def filter(self, record): """ Determine if the specified record is to be logged. Is the specified record to be logged? Returns 0 for no, nonzero for yes. If deemed appropriate, the record may be modified in-place. """ if self.nlen == 0: return 1 elif self.name == record.name: return 1 elif record.name.find(self.name, 0, self.nlen) != 0: return 0 return (record.name[self.nlen] == ".") class Filterer(object): """ A base class for loggers and handlers which allows them to share common code. """ def __init__(self): """ Initialize the list of filters to be an empty list. """ self.filters = [] def addFilter(self, filter): """ Add the specified filter to this handler. """ if not (filter in self.filters): self.filters.append(filter) def removeFilter(self, filter): """ Remove the specified filter from this handler. """ if filter in self.filters: self.filters.remove(filter) def filter(self, record): """ Determine if a record is loggable by consulting all the filters. The default is to allow the record to be logged; any filter can veto this and the record is then dropped. Returns a zero value if a record is to be dropped, else non-zero. """ rv = 1 for f in self.filters: if not f.filter(record): rv = 0 break return rv #--------------------------------------------------------------------------- # Handler classes and functions #--------------------------------------------------------------------------- _handlers = weakref.WeakValueDictionary() #map of handler names to handlers _handlerList = [] # added to allow handlers to be removed in reverse of order initialized def _removeHandlerRef(wr): """ Remove a handler reference from the internal cleanup list. """ # This function can be called during module teardown, when globals are # set to None. It can also be called from another thread. So we need to # pre-emptively grab the necessary globals and check if they're None, # to prevent race conditions and failures during interpreter shutdown. acquire, release, handlers = _acquireLock, _releaseLock, _handlerList if acquire and release and handlers: try: acquire() try: if wr in handlers: handlers.remove(wr) finally: release() except TypeError: # https://bugs.python.org/issue21149 - If the RLock object behind # acquire() and release() has been partially finalized you may see # an error about NoneType not being callable. Absolutely nothing # we can do in this GC during process shutdown situation. Eat it. pass def _addHandlerRef(handler): """ Add a handler to the internal cleanup list using a weak reference. """ _acquireLock() try: _handlerList.append(weakref.ref(handler, _removeHandlerRef)) finally: _releaseLock() class Handler(Filterer): """ Handler instances dispatch logging events to specific destinations. The base handler class. Acts as a placeholder which defines the Handler interface. Handlers can optionally use Formatter instances to format records as desired. By default, no formatter is specified; in this case, the 'raw' message as determined by record.message is logged. """ def __init__(self, level=NOTSET): """ Initializes the instance - basically setting the formatter to None and the filter list to empty. """ Filterer.__init__(self) self._name = None self.level = _checkLevel(level) self.formatter = None # Add the handler to the global _handlerList (for cleanup on shutdown) _addHandlerRef(self) self.createLock() def get_name(self): return self._name def set_name(self, name): _acquireLock() try: if self._name in _handlers: del _handlers[self._name] self._name = name if name: _handlers[name] = self finally: _releaseLock() name = property(get_name, set_name) def createLock(self): """ Acquire a thread lock for serializing access to the underlying I/O. """ if thread: self.lock = threading.RLock() else: self.lock = None def acquire(self): """ Acquire the I/O thread lock. """ if self.lock: self.lock.acquire() def release(self): """ Release the I/O thread lock. """ if self.lock: self.lock.release() def setLevel(self, level): """ Set the logging level of this handler. """ self.level = _checkLevel(level) def format(self, record): """ Format the specified record. If a formatter is set, use it. Otherwise, use the default formatter for the module. """ if self.formatter: fmt = self.formatter else: fmt = _defaultFormatter return fmt.format(record) def emit(self, record): """ Do whatever it takes to actually log the specified logging record. This version is intended to be implemented by subclasses and so raises a NotImplementedError. """ raise NotImplementedError('emit must be implemented ' 'by Handler subclasses') def handle(self, record): """ Conditionally emit the specified logging record. Emission depends on filters which may have been added to the handler. Wrap the actual emission of the record with acquisition/release of the I/O thread lock. Returns whether the filter passed the record for emission. """ rv = self.filter(record) if rv: self.acquire() try: self.emit(record) finally: self.release() return rv def setFormatter(self, fmt): """ Set the formatter for this handler. """ self.formatter = fmt def flush(self): """ Ensure all logging output has been flushed. This version does nothing and is intended to be implemented by subclasses. """ pass def close(self): """ Tidy up any resources used by the handler. This version removes the handler from an internal map of handlers, _handlers, which is used for handler lookup by name. Subclasses should ensure that this gets called from overridden close() methods. """ #get the module data lock, as we're updating a shared structure. _acquireLock() try: #unlikely to raise an exception, but you never know... if self._name and self._name in _handlers: del _handlers[self._name] finally: _releaseLock() def handleError(self, record): """ Handle errors which occur during an emit() call. This method should be called from handlers when an exception is encountered during an emit() call. If raiseExceptions is false, exceptions get silently ignored. This is what is mostly wanted for a logging system - most users will not care about errors in the logging system, they are more interested in application errors. You could, however, replace this with a custom handler if you wish. The record which was being processed is passed in to this method. """ if raiseExceptions and sys.stderr: # see issue 13807 ei = sys.exc_info() try: traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) sys.stderr.write('Logged from file %s, line %s\n' % ( record.filename, record.lineno)) except IOError: pass # see issue 5971 finally: del ei class StreamHandler(Handler): """ A handler class which writes logging records, appropriately formatted, to a stream. Note that this class does not close the stream, as sys.stdout or sys.stderr may be used. """ def __init__(self, stream=None): """ Initialize the handler. If stream is not specified, sys.stderr is used. """ Handler.__init__(self) if stream is None: stream = sys.stderr self.stream = stream def flush(self): """ Flushes the stream. """ self.acquire() try: if self.stream and hasattr(self.stream, "flush"): self.stream.flush() finally: self.release() def emit(self, record): """ Emit a record. If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. If the stream has an 'encoding' attribute, it is used to determine how to do the output to the stream. """ try: msg = self.format(record) stream = self.stream fs = "%s\n" if not _unicode: #if no unicode support... stream.write(fs % msg) else: try: if (isinstance(msg, unicode) and getattr(stream, 'encoding', None)): ufs = u'%s\n' try: stream.write(ufs % msg) except UnicodeEncodeError: #Printing to terminals sometimes fails. For example, #with an encoding of 'cp1251', the above write will #work if written to a stream opened or wrapped by #the codecs module, but fail when writing to a #terminal even when the codepage is set to cp1251. #An extra encoding step seems to be needed. stream.write((ufs % msg).encode(stream.encoding)) else: stream.write(fs % msg) except UnicodeError: stream.write(fs % msg.encode("UTF-8")) self.flush() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class FileHandler(StreamHandler): """ A handler class which writes formatted logging records to disk files. """ def __init__(self, filename, mode='a', encoding=None, delay=0): """ Open the specified file and use it as the stream for logging. """ #keep the absolute path, otherwise derived classes which use this #may come a cropper when the current directory changes if codecs is None: encoding = None self.baseFilename = os.path.abspath(filename) self.mode = mode self.encoding = encoding self.delay = delay if delay: #We don't open the stream, but we still need to call the #Handler constructor to set level, formatter, lock etc. Handler.__init__(self) self.stream = None else: StreamHandler.__init__(self, self._open()) def close(self): """ Closes the stream. """ self.acquire() try: try: if self.stream: try: self.flush() finally: stream = self.stream self.stream = None if hasattr(stream, "close"): stream.close() finally: # Issue #19523: call unconditionally to # prevent a handler leak when delay is set StreamHandler.close(self) finally: self.release() def _open(self): """ Open the current base file with the (original) mode and encoding. Return the resulting stream. """ if self.encoding is None: stream = open(self.baseFilename, self.mode) else: stream = codecs.open(self.baseFilename, self.mode, self.encoding) return stream def emit(self, record): """ Emit a record. If the stream was not opened because 'delay' was specified in the constructor, open it before calling the superclass's emit. """ if self.stream is None: self.stream = self._open() StreamHandler.emit(self, record) #--------------------------------------------------------------------------- # Manager classes and functions #--------------------------------------------------------------------------- class PlaceHolder(object): """ PlaceHolder instances are used in the Manager logger hierarchy to take the place of nodes for which no loggers have been defined. This class is intended for internal use only and not as part of the public API. """ def __init__(self, alogger): """ Initialize with the specified logger being a child of this placeholder. """ #self.loggers = [alogger] self.loggerMap = { alogger : None } def append(self, alogger): """ Add the specified logger as a child of this placeholder. """ #if alogger not in self.loggers: if alogger not in self.loggerMap: #self.loggers.append(alogger) self.loggerMap[alogger] = None # # Determine which class to use when instantiating loggers. # _loggerClass = None def setLoggerClass(klass): """ Set the class to be used when instantiating a logger. The class should define __init__() such that only a name argument is required, and the __init__() should call Logger.__init__() """ if klass != Logger: if not issubclass(klass, Logger): raise TypeError("logger not derived from logging.Logger: " + klass.__name__) global _loggerClass _loggerClass = klass def getLoggerClass(): """ Return the class to be used when instantiating a logger. """ return _loggerClass class Manager(object): """ There is [under normal circumstances] just one Manager instance, which holds the hierarchy of loggers. """ def __init__(self, rootnode): """ Initialize the manager with the root node of the logger hierarchy. """ self.root = rootnode self.disable = 0 self.emittedNoHandlerWarning = 0 self.loggerDict = {} self.loggerClass = None def getLogger(self, name): """ Get a logger with the specified name (channel name), creating it if it doesn't yet exist. This name is a dot-separated hierarchical name, such as "a", "a.b", "a.b.c" or similar. If a PlaceHolder existed for the specified name [i.e. the logger didn't exist but a child of it did], replace it with the created logger and fix up the parent/child references which pointed to the placeholder to now point to the logger. """ rv = None if not isinstance(name, basestring): raise TypeError('A logger name must be string or Unicode') if isinstance(name, unicode): name = name.encode('utf-8') _acquireLock() try: if name in self.loggerDict: rv = self.loggerDict[name] if isinstance(rv, PlaceHolder): ph = rv rv = (self.loggerClass or _loggerClass)(name) rv.manager = self self.loggerDict[name] = rv self._fixupChildren(ph, rv) self._fixupParents(rv) else: rv = (self.loggerClass or _loggerClass)(name) rv.manager = self self.loggerDict[name] = rv self._fixupParents(rv) finally: _releaseLock() return rv def setLoggerClass(self, klass): """ Set the class to be used when instantiating a logger with this Manager. """ if klass != Logger: if not issubclass(klass, Logger): raise TypeError("logger not derived from logging.Logger: " + klass.__name__) self.loggerClass = klass def _fixupParents(self, alogger): """ Ensure that there are either loggers or placeholders all the way from the specified logger to the root of the logger hierarchy. """ name = alogger.name i = name.rfind(".") rv = None while (i > 0) and not rv: substr = name[:i] if substr not in self.loggerDict: self.loggerDict[substr] = PlaceHolder(alogger) else: obj = self.loggerDict[substr] if isinstance(obj, Logger): rv = obj else: assert isinstance(obj, PlaceHolder) obj.append(alogger) i = name.rfind(".", 0, i - 1) if not rv: rv = self.root alogger.parent = rv def _fixupChildren(self, ph, alogger): """ Ensure that children of the placeholder ph are connected to the specified logger. """ name = alogger.name namelen = len(name) for c in ph.loggerMap.keys(): #The if means ... if not c.parent.name.startswith(nm) if c.parent.name[:namelen] != name: alogger.parent = c.parent c.parent = alogger #--------------------------------------------------------------------------- # Logger classes and functions #--------------------------------------------------------------------------- class Logger(Filterer): """ Instances of the Logger class represent a single logging channel. A "logging channel" indicates an area of an application. Exactly how an "area" is defined is up to the application developer. Since an application can have any number of areas, logging channels are identified by a unique string. Application areas can be nested (e.g. an area of "input processing" might include sub-areas "read CSV files", "read XLS files" and "read Gnumeric files"). To cater for this natural nesting, channel names are organized into a namespace hierarchy where levels are separated by periods, much like the Java or Python package namespace. So in the instance given above, channel names might be "input" for the upper level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. There is no arbitrary limit to the depth of nesting. """ def __init__(self, name, level=NOTSET): """ Initialize the logger with a name and an optional level. """ Filterer.__init__(self) self.name = name self.level = _checkLevel(level) self.parent = None self.propagate = 1 self.handlers = [] self.disabled = 0 def setLevel(self, level): """ Set the logging level of this logger. """ self.level = _checkLevel(level) def debug(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'DEBUG'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) """ if self.isEnabledFor(DEBUG): self._log(DEBUG, msg, args, **kwargs) def info(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'INFO'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.info("Houston, we have a %s", "interesting problem", exc_info=1) """ if self.isEnabledFor(INFO): self._log(INFO, msg, args, **kwargs) def warning(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'WARNING'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) """ if self.isEnabledFor(WARNING): self._log(WARNING, msg, args, **kwargs) warn = warning def error(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'ERROR'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.error("Houston, we have a %s", "major problem", exc_info=1) """ if self.isEnabledFor(ERROR): self._log(ERROR, msg, args, **kwargs) def exception(self, msg, *args, **kwargs): """ Convenience method for logging an ERROR with exception information. """ kwargs['exc_info'] = 1 self.error(msg, *args, **kwargs) def critical(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'CRITICAL'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.critical("Houston, we have a %s", "major disaster", exc_info=1) """ if self.isEnabledFor(CRITICAL): self._log(CRITICAL, msg, args, **kwargs) fatal = critical def log(self, level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1) """ if not isinstance(level, (int, long)): if raiseExceptions: raise TypeError("level must be an integer") else: return if self.isEnabledFor(level): self._log(level, msg, args, **kwargs) def findCaller(self): """ Find the stack frame of the caller so that we can note the source file name, line number and function name. """ f = currentframe() #On some versions of IronPython, currentframe() returns None if #IronPython isn't run with -X:Frames. if f is not None: f = f.f_back rv = "(unknown file)", 0, "(unknown function)" while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) if filename == _srcfile: f = f.f_back continue rv = (co.co_filename, f.f_lineno, co.co_name) break return rv def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None): """ A factory method which can be overridden in subclasses to create specialized LogRecords. """ rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func) if extra is not None: for key in extra: if (key in ["message", "asctime"]) or (key in rv.__dict__): raise KeyError("Attempt to overwrite %r in LogRecord" % key) rv.__dict__[key] = extra[key] return rv def _log(self, level, msg, args, exc_info=None, extra=None): """ Low-level logging routine which creates a LogRecord and then calls all the handlers of this logger to handle the record. """ if _srcfile: #IronPython doesn't track Python frames, so findCaller raises an #exception on some versions of IronPython. We trap it here so that #IronPython can use logging. try: fn, lno, func = self.findCaller() except ValueError: fn, lno, func = "(unknown file)", 0, "(unknown function)" else: fn, lno, func = "(unknown file)", 0, "(unknown function)" if exc_info: if not isinstance(exc_info, tuple): exc_info = sys.exc_info() record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra) self.handle(record) def handle(self, record): """ Call the handlers for the specified record. This method is used for unpickled records received from a socket, as well as those created locally. Logger-level filtering is applied. """ if (not self.disabled) and self.filter(record): self.callHandlers(record) def addHandler(self, hdlr): """ Add the specified handler to this logger. """ _acquireLock() try: if not (hdlr in self.handlers): self.handlers.append(hdlr) finally: _releaseLock() def removeHandler(self, hdlr): """ Remove the specified handler from this logger. """ _acquireLock() try: if hdlr in self.handlers: self.handlers.remove(hdlr) finally: _releaseLock() def callHandlers(self, record): """ Pass a record to all relevant handlers. Loop through all handlers for this logger and its parents in the logger hierarchy. If no handler was found, output a one-off error message to sys.stderr. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger whose handlers are called. """ c = self found = 0 while c: for hdlr in c.handlers: found = found + 1 if record.levelno >= hdlr.level: hdlr.handle(record) if not c.propagate: c = None #break out else: c = c.parent if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning: sys.stderr.write("No handlers could be found for logger" " \"%s\"\n" % self.name) self.manager.emittedNoHandlerWarning = 1 def getEffectiveLevel(self): """ Get the effective level for this logger. Loop through this logger and its parents in the logger hierarchy, looking for a non-zero logging level. Return the first one found. """ logger = self while logger: if logger.level: return logger.level logger = logger.parent return NOTSET def isEnabledFor(self, level): """ Is this logger enabled for level 'level'? """ if self.manager.disable >= level: return 0 return level >= self.getEffectiveLevel() def getChild(self, suffix): """ Get a logger which is a descendant to this one. This is a convenience method, such that logging.getLogger('abc').getChild('def.ghi') is the same as logging.getLogger('abc.def.ghi') It's useful, for example, when the parent logger is named using __name__ rather than a literal string. """ if self.root is not self: suffix = '.'.join((self.name, suffix)) return self.manager.getLogger(suffix) class RootLogger(Logger): """ A root logger is not that different to any other logger, except that it must have a logging level and there is only one instance of it in the hierarchy. """ def __init__(self, level): """ Initialize the logger with the name "root". """ Logger.__init__(self, "root", level) _loggerClass = Logger class LoggerAdapter(object): """ An adapter for loggers which makes it easier to specify contextual information in logging output. """ def __init__(self, logger, extra): """ Initialize the adapter with a logger and a dict-like object which provides contextual information. This constructor signature allows easy stacking of LoggerAdapters, if so desired. You can effectively pass keyword arguments as shown in the following example: adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) """ self.logger = logger self.extra = extra def process(self, msg, kwargs): """ Process the logging message and keyword arguments passed in to a logging call to insert contextual information. You can either manipulate the message itself, the keyword args or both. Return the message and kwargs modified (or not) to suit your needs. Normally, you'll only need to override this one method in a LoggerAdapter subclass for your specific needs. """ kwargs["extra"] = self.extra return msg, kwargs def debug(self, msg, *args, **kwargs): """ Delegate a debug call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.debug(msg, *args, **kwargs) def info(self, msg, *args, **kwargs): """ Delegate an info call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.info(msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): """ Delegate a warning call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.warning(msg, *args, **kwargs) def error(self, msg, *args, **kwargs): """ Delegate an error call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.error(msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): """ Delegate an exception call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) kwargs["exc_info"] = 1 self.logger.error(msg, *args, **kwargs) def critical(self, msg, *args, **kwargs): """ Delegate a critical call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.critical(msg, *args, **kwargs) def log(self, level, msg, *args, **kwargs): """ Delegate a log call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) def isEnabledFor(self, level): """ See if the underlying logger is enabled for the specified level. """ return self.logger.isEnabledFor(level) root = RootLogger(WARNING) Logger.root = root Logger.manager = Manager(Logger.root) #--------------------------------------------------------------------------- # Configuration classes and functions #--------------------------------------------------------------------------- BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" def basicConfig(**kwargs): """ Do basic configuration for the logging system. This function does nothing if the root logger already has handlers configured. It is a convenience method intended for use by simple scripts to do one-shot configuration of the logging package. The default behaviour is to create a StreamHandler which writes to sys.stderr, set a formatter using the BASIC_FORMAT format string, and add the handler to the root logger. A number of optional keyword arguments may be specified, which can alter the default behaviour. filename Specifies that a FileHandler be created, using the specified filename, rather than a StreamHandler. filemode Specifies the mode to open the file, if filename is specified (if filemode is unspecified, it defaults to 'a'). format Use the specified format string for the handler. datefmt Use the specified date/time format. level Set the root logger level to the specified level. stream Use the specified stream to initialize the StreamHandler. Note that this argument is incompatible with 'filename' - if both are present, 'stream' is ignored. Note that you could specify a stream created using open(filename, mode) rather than passing the filename and mode in. However, it should be remembered that StreamHandler does not close its stream (since it may be using sys.stdout or sys.stderr), whereas FileHandler closes its stream when the handler is closed. """ # Add thread safety in case someone mistakenly calls # basicConfig() from multiple threads _acquireLock() try: if len(root.handlers) == 0: filename = kwargs.get("filename") if filename: mode = kwargs.get("filemode", 'a') hdlr = FileHandler(filename, mode) else: stream = kwargs.get("stream") hdlr = StreamHandler(stream) fs = kwargs.get("format", BASIC_FORMAT) dfs = kwargs.get("datefmt", None) fmt = Formatter(fs, dfs) hdlr.setFormatter(fmt) root.addHandler(hdlr) level = kwargs.get("level") if level is not None: root.setLevel(level) finally: _releaseLock() #--------------------------------------------------------------------------- # Utility functions at module level. # Basically delegate everything to the root logger. #--------------------------------------------------------------------------- def getLogger(name=None): """ Return a logger with the specified name, creating it if necessary. If no name is specified, return the root logger. """ if name: return Logger.manager.getLogger(name) else: return root #def getRootLogger(): # """ # Return the root logger. # # Note that getLogger('') now does the same thing, so this function is # deprecated and may disappear in the future. # """ # return root def critical(msg, *args, **kwargs): """ Log a message with severity 'CRITICAL' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.critical(msg, *args, **kwargs) fatal = critical def error(msg, *args, **kwargs): """ Log a message with severity 'ERROR' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.error(msg, *args, **kwargs) def exception(msg, *args, **kwargs): """ Log a message with severity 'ERROR' on the root logger, with exception information. """ kwargs['exc_info'] = 1 error(msg, *args, **kwargs) def warning(msg, *args, **kwargs): """ Log a message with severity 'WARNING' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.warning(msg, *args, **kwargs) warn = warning def info(msg, *args, **kwargs): """ Log a message with severity 'INFO' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.info(msg, *args, **kwargs) def debug(msg, *args, **kwargs): """ Log a message with severity 'DEBUG' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.debug(msg, *args, **kwargs) def log(level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level' on the root logger. """ if len(root.handlers) == 0: basicConfig() root.log(level, msg, *args, **kwargs) def disable(level): """ Disable all logging calls of severity 'level' and below. """ root.manager.disable = level def shutdown(handlerList=_handlerList): """ Perform any cleanup actions in the logging system (e.g. flushing buffers). Should be called at application exit. """ for wr in reversed(handlerList[:]): #errors might occur, for example, if files are locked #we just ignore them if raiseExceptions is not set try: h = wr() if h: try: h.acquire() h.flush() h.close() except (IOError, ValueError): # Ignore errors which might be caused # because handlers have been closed but # references to them are still around at # application exit. pass finally: h.release() except: if raiseExceptions: raise #else, swallow #Let's try and shutdown automatically on application exit... import atexit atexit.register(shutdown) # Null handler class NullHandler(Handler): """ This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package. """ def handle(self, record): pass def emit(self, record): pass def createLock(self): self.lock = None # Warnings integration _warnings_showwarning = None def _showwarning(message, category, filename, lineno, file=None, line=None): """ Implementation of showwarnings which redirects to logging, which will first check to see if the file parameter is None. If a file is specified, it will delegate to the original warnings implementation of showwarning. Otherwise, it will call warnings.formatwarning and will log the resulting string to a warnings logger named "py.warnings" with level logging.WARNING. """ if file is not None: if _warnings_showwarning is not None: _warnings_showwarning(message, category, filename, lineno, file, line) else: s = warnings.formatwarning(message, category, filename, lineno, line) logger = getLogger("py.warnings") if not logger.handlers: logger.addHandler(NullHandler()) logger.warning("%s", s) def captureWarnings(capture): """ If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations. """ global _warnings_showwarning if capture: if _warnings_showwarning is None: _warnings_showwarning = warnings.showwarning warnings.showwarning = _showwarning else: if _warnings_showwarning is not None: warnings.showwarning = _warnings_showwarning _warnings_showwarning = None usr/lib64/python3.6/collections/__init__.py000064400000131364151027350410014502 0ustar00'''This module implements specialized container datatypes providing alternatives to Python's general purpose built-in containers, dict, list, set, and tuple. * namedtuple factory function for creating tuple subclasses with named fields * deque list-like container with fast appends and pops on either end * ChainMap dict-like class for creating a single view of multiple mappings * Counter dict subclass for counting hashable objects * OrderedDict dict subclass that remembers the order entries were added * defaultdict dict subclass that calls a factory function to supply missing values * UserDict wrapper around dictionary objects for easier dict subclassing * UserList wrapper around list objects for easier list subclassing * UserString wrapper around string objects for easier string subclassing ''' __all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', 'UserString', 'Counter', 'OrderedDict', 'ChainMap'] # For backwards compatibility, continue to make the collections ABCs # available through the collections module. from _collections_abc import * import _collections_abc __all__ += _collections_abc.__all__ from operator import itemgetter as _itemgetter, eq as _eq from keyword import iskeyword as _iskeyword import sys as _sys import heapq as _heapq from _weakref import proxy as _proxy from itertools import repeat as _repeat, chain as _chain, starmap as _starmap from reprlib import recursive_repr as _recursive_repr try: from _collections import deque except ImportError: pass else: MutableSequence.register(deque) try: from _collections import defaultdict except ImportError: pass ################################################################################ ### OrderedDict ################################################################################ class _OrderedDictKeysView(KeysView): def __reversed__(self): yield from reversed(self._mapping) class _OrderedDictItemsView(ItemsView): def __reversed__(self): for key in reversed(self._mapping): yield (key, self._mapping[key]) class _OrderedDictValuesView(ValuesView): def __reversed__(self): for key in reversed(self._mapping): yield self._mapping[key] class _Link(object): __slots__ = 'prev', 'next', 'key', '__weakref__' class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as regular dictionaries. # The internal self.__map dict maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # The sentinel is in self.__hardroot with a weakref proxy in self.__root. # The prev links are weakref proxies (to prevent circular references). # Individual links are kept alive by the hard reference in self.__map. # Those hard references disappear when a key is deleted from an OrderedDict. def __init__(*args, **kwds): '''Initialize an ordered dictionary. The signature is the same as regular dictionaries. Keyword argument order is preserved. ''' if not args: raise TypeError("descriptor '__init__' of 'OrderedDict' object " "needs an argument") self, *args = args if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__hardroot = _Link() self.__root = root = _proxy(self.__hardroot) root.prev = root.next = root self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link at the end of the linked list, # and the inherited dictionary is updated with the new key/value pair. if key not in self: self.__map[key] = link = Link() root = self.__root last = root.prev link.prev, link.next, link.key = last, root, key last.next = link root.prev = proxy(link) dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which gets # removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link = self.__map.pop(key) link_prev = link.prev link_next = link.next link_prev.next = link_next link_next.prev = link_prev link.prev = None link.next = None def __iter__(self): 'od.__iter__() <==> iter(od)' # Traverse the linked list in order. root = self.__root curr = root.next while curr is not root: yield curr.key curr = curr.next def __reversed__(self): 'od.__reversed__() <==> reversed(od)' # Traverse the linked list in reverse order. root = self.__root curr = root.prev while curr is not root: yield curr.key curr = curr.prev def clear(self): 'od.clear() -> None. Remove all items from od.' root = self.__root root.prev = root.next = root self.__map.clear() dict.clear(self) def popitem(self, last=True): '''Remove and return a (key, value) pair from the dictionary. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root.prev link_prev = link.prev link_prev.next = root root.prev = link_prev else: link = root.next link_next = link.next root.next = link_next link_next.prev = root key = link.key del self.__map[key] value = dict.pop(self, key) return key, value def move_to_end(self, key, last=True): '''Move an existing element to the end (or beginning if last==False). Raises KeyError if the element does not exist. When last=True, acts like a fast version of self[key]=self.pop(key). ''' link = self.__map[key] link_prev = link.prev link_next = link.next soft_link = link_next.prev link_prev.next = link_next link_next.prev = link_prev root = self.__root if last: last = root.prev link.prev = last link.next = root root.prev = soft_link last.next = link else: first = root.next link.prev = root link.next = first first.prev = soft_link root.next = link def __sizeof__(self): sizeof = _sys.getsizeof n = len(self) + 1 # number of links including root size = sizeof(self.__dict__) # instance dictionary size += sizeof(self.__map) * 2 # internal dict and inherited dict size += sizeof(self.__hardroot) * n # link objects size += sizeof(self.__root) * n # proxy objects return size update = __update = MutableMapping.update def keys(self): "D.keys() -> a set-like object providing a view on D's keys" return _OrderedDictKeysView(self) def items(self): "D.items() -> a set-like object providing a view on D's items" return _OrderedDictItemsView(self) def values(self): "D.values() -> an object providing a view on D's values" return _OrderedDictValuesView(self) __ne__ = MutableMapping.__ne__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default @_recursive_repr() def __repr__(self): 'od.__repr__() <==> repr(od)' if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self.items())) def __reduce__(self): 'Return state information for pickling' inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) return self.__class__, (), inst_dict or None, None, iter(self.items()) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. If not specified, the value defaults to None. ''' self = cls() for key in iterable: self[key] = value return self def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return dict.__eq__(self, other) and all(map(_eq, self, other)) return dict.__eq__(self, other) try: from _collections import OrderedDict except ImportError: # Leave the pure Python version in place. pass ################################################################################ ### namedtuple ################################################################################ _class_template = """\ from builtins import property as _property, tuple as _tuple from operator import itemgetter as _itemgetter from collections import OrderedDict class {typename}(tuple): '{typename}({arg_list})' __slots__ = () _fields = {field_names!r} def __new__(_cls, {arg_list}): 'Create new instance of {typename}({arg_list})' return _tuple.__new__(_cls, ({arg_list})) @classmethod def _make(cls, iterable, new=tuple.__new__, len=len): 'Make a new {typename} object from a sequence or iterable' result = new(cls, iterable) if len(result) != {num_fields:d}: raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result)) return result def _replace(_self, **kwds): 'Return a new {typename} object replacing specified fields with new values' result = _self._make(map(kwds.pop, {field_names!r}, _self)) if kwds: raise ValueError('Got unexpected field names: %r' % list(kwds)) return result def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + '({repr_fmt})' % self def _asdict(self): 'Return a new OrderedDict which maps field names to their values.' return OrderedDict(zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return tuple(self) {field_defs} """ _repr_template = '{name}=%r' _field_template = '''\ {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}') ''' def namedtuple(typename, field_names, *, verbose=False, rename=False, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = str(typename) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = '_%d' % index seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' 'identifiers: %r' % name) if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' 'keyword: %r' % name) seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' '%r' % name) if name in seen: raise ValueError('Encountered duplicate field name: %r' % name) seen.add(name) # Fill-in the class template class_definition = _class_template.format( typename = typename, field_names = tuple(field_names), num_fields = len(field_names), arg_list = repr(tuple(field_names)).replace("'", "")[1:-1], repr_fmt = ', '.join(_repr_template.format(name=name) for name in field_names), field_defs = '\n'.join(_field_template.format(index=index, name=name) for index, name in enumerate(field_names)) ) # Execute the template string in a temporary namespace and support # tracing utilities by setting a value for frame.f_globals['__name__'] namespace = dict(__name__='namedtuple_%s' % typename) exec(class_definition, namespace) result = namespace[typename] result._source = class_definition if verbose: print(result._source) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result ######################################################################## ### Counter ######################################################################## def _count_elements(mapping, iterable): 'Tally elements from the iterable.' mapping_get = mapping.get for elem in iterable: mapping[elem] = mapping_get(elem, 0) + 1 try: # Load C helper function if available from _collections import _count_elements except ImportError: pass class Counter(dict): '''Dict subclass for counting hashable items. Sometimes called a bag or multiset. Elements are stored as dictionary keys and their counts are stored as dictionary values. >>> c = Counter('abcdeabcdabcaba') # count elements from a string >>> c.most_common(3) # three most common elements [('a', 5), ('b', 4), ('c', 3)] >>> sorted(c) # list all unique elements ['a', 'b', 'c', 'd', 'e'] >>> ''.join(sorted(c.elements())) # list elements with repetitions 'aaaaabbbbcccdde' >>> sum(c.values()) # total of all counts 15 >>> c['a'] # count of letter 'a' 5 >>> for elem in 'shazam': # update counts from an iterable ... c[elem] += 1 # by adding 1 to each element's count >>> c['a'] # now there are seven 'a' 7 >>> del c['b'] # remove all 'b' >>> c['b'] # now there are zero 'b' 0 >>> d = Counter('simsalabim') # make another counter >>> c.update(d) # add in the second counter >>> c['a'] # now there are nine 'a' 9 >>> c.clear() # empty the counter >>> c Counter() Note: If a count is set to zero or reduced to zero, it will remain in the counter until the entry is deleted or the counter is cleared: >>> c = Counter('aaabbc') >>> c['b'] -= 2 # reduce the count of 'b' by two >>> c.most_common() # 'b' is still in, but its count is zero [('a', 3), ('c', 1), ('b', 0)] ''' # References: # http://en.wikipedia.org/wiki/Multiset # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm # http://code.activestate.com/recipes/259174/ # Knuth, TAOCP Vol. II section 4.6.3 def __init__(*args, **kwds): '''Create a new, empty Counter object. And if given, count elements from an input iterable. Or, initialize the count from another mapping of elements to their counts. >>> c = Counter() # a new, empty counter >>> c = Counter('gallahad') # a new counter from an iterable >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping >>> c = Counter(a=4, b=2) # a new counter from keyword args ''' if not args: raise TypeError("descriptor '__init__' of 'Counter' object " "needs an argument") self, *args = args if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) super(Counter, self).__init__() self.update(*args, **kwds) def __missing__(self, key): 'The count of elements not in the Counter is zero.' # Needed so that self[missing_item] does not raise KeyError return 0 def most_common(self, n=None): '''List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. >>> Counter('abcdeabcdabcaba').most_common(3) [('a', 5), ('b', 4), ('c', 3)] ''' # Emulate Bag.sortedByCount from Smalltalk if n is None: return sorted(self.items(), key=_itemgetter(1), reverse=True) return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary @classmethod def fromkeys(cls, iterable, v=None): # There is no equivalent method for counters because setting v=1 # means that no element can have a count greater than one. raise NotImplementedError( 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') def update(*args, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' # The regular dict.update() operation makes no sense here because the # replace behavior results in the some of original untouched counts # being mixed-in with all of the other counts for a mismash that # doesn't have a straight-forward interpretation in most counting # contexts. Instead, we implement straight-addition. Both the inputs # and outputs are allowed to contain zero and negative counts. if not args: raise TypeError("descriptor 'update' of 'Counter' object " "needs an argument") self, *args = args if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) iterable = args[0] if args else None if iterable is not None: if isinstance(iterable, Mapping): if self: self_get = self.get for elem, count in iterable.items(): self[elem] = count + self_get(elem, 0) else: super(Counter, self).update(iterable) # fast path when counter is empty else: _count_elements(self, iterable) if kwds: self.update(kwds) def subtract(*args, **kwds): '''Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.subtract('witch') # subtract elements from another iterable >>> c.subtract(Counter('watch')) # subtract elements from another counter >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch -1 ''' if not args: raise TypeError("descriptor 'subtract' of 'Counter' object " "needs an argument") self, *args = args if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) iterable = args[0] if args else None if iterable is not None: self_get = self.get if isinstance(iterable, Mapping): for elem, count in iterable.items(): self[elem] = self_get(elem, 0) - count else: for elem in iterable: self[elem] = self_get(elem, 0) - 1 if kwds: self.subtract(kwds) def copy(self): 'Return a shallow copy.' return self.__class__(self) def __reduce__(self): return self.__class__, (dict(self),) def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: super().__delitem__(elem) def __repr__(self): if not self: return '%s()' % self.__class__.__name__ try: items = ', '.join(map('%r: %r'.__mod__, self.most_common())) return '%s({%s})' % (self.__class__.__name__, items) except TypeError: # handle case where values are not orderable return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) # Multiset-style mathematical operations discussed in: # Knuth TAOCP Volume II section 4.6.3 exercise 19 # and at http://en.wikipedia.org/wiki/Multiset # # Outputs guaranteed to only include positive counts. # # To strip negative and zero counts, add-in an empty counter: # c += Counter() def __add__(self, other): '''Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count + other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __sub__(self, other): ''' Subtract count, but keep only results with positive counts. >>> Counter('abbbc') - Counter('bccd') Counter({'b': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): newcount = count - other[elem] if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count < 0: result[elem] = 0 - count return result def __or__(self, other): '''Union is the maximum of value in either of the input counters. >>> Counter('abbb') | Counter('bcc') Counter({'b': 3, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = other_count if count < other_count else count if newcount > 0: result[elem] = newcount for elem, count in other.items(): if elem not in self and count > 0: result[elem] = count return result def __and__(self, other): ''' Intersection is the minimum of corresponding counts. >>> Counter('abbb') & Counter('bcc') Counter({'b': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem, count in self.items(): other_count = other[elem] newcount = count if count < other_count else other_count if newcount > 0: result[elem] = newcount return result def __pos__(self): 'Adds an empty counter, effectively stripping negative and zero counts' result = Counter() for elem, count in self.items(): if count > 0: result[elem] = count return result def __neg__(self): '''Subtracts from an empty counter. Strips positive and zero counts, and flips the sign on negative counts. ''' result = Counter() for elem, count in self.items(): if count < 0: result[elem] = 0 - count return result def _keep_positive(self): '''Internal method to strip elements with a negative or zero count''' nonpositive = [elem for elem, count in self.items() if not count > 0] for elem in nonpositive: del self[elem] return self def __iadd__(self, other): '''Inplace add from another counter, keeping only positive counts. >>> c = Counter('abbb') >>> c += Counter('bcc') >>> c Counter({'b': 4, 'c': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] += count return self._keep_positive() def __isub__(self, other): '''Inplace subtract counter, but keep only results with positive counts. >>> c = Counter('abbbc') >>> c -= Counter('bccd') >>> c Counter({'b': 2, 'a': 1}) ''' for elem, count in other.items(): self[elem] -= count return self._keep_positive() def __ior__(self, other): '''Inplace union is the maximum of value from either counter. >>> c = Counter('abbb') >>> c |= Counter('bcc') >>> c Counter({'b': 3, 'c': 2, 'a': 1}) ''' for elem, other_count in other.items(): count = self[elem] if other_count > count: self[elem] = other_count return self._keep_positive() def __iand__(self, other): '''Inplace intersection is the minimum of corresponding counts. >>> c = Counter('abbb') >>> c &= Counter('bcc') >>> c Counter({'b': 1}) ''' for elem, count in self.items(): other_count = other[elem] if other_count < count: self[elem] = other_count return self._keep_positive() ######################################################################## ### ChainMap ######################################################################## class ChainMap(MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can be accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): return iter(set().union(*self.maps)) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) @_recursive_repr() def __repr__(self): return '{0.__class__.__name__}({1})'.format( self, ', '.join(map(repr, self.maps))) @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self, m=None): # like Django's Context.push() '''New ChainMap with a new map followed by all previous maps. If no map is provided, an empty dict is used. ''' if m is None: m = {} return self.__class__(m, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() ################################################################################ ### UserDict ################################################################################ class UserDict(MutableMapping): # Start by filling-out the abstract methods def __init__(*args, **kwargs): if not args: raise TypeError("descriptor '__init__' of 'UserDict' object " "needs an argument") self, *args = args if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) if args: dict = args[0] elif 'dict' in kwargs: dict = kwargs.pop('dict') import warnings warnings.warn("Passing 'dict' as keyword argument is deprecated", DeprecationWarning, stacklevel=2) else: dict = None self.data = {} if dict is not None: self.update(dict) if len(kwargs): self.update(kwargs) def __len__(self): return len(self.data) def __getitem__(self, key): if key in self.data: return self.data[key] if hasattr(self.__class__, "__missing__"): return self.__class__.__missing__(self, key) raise KeyError(key) def __setitem__(self, key, item): self.data[key] = item def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) # Modify __contains__ to work correctly when __missing__ is present def __contains__(self, key): return key in self.data # Now, add the methods in dicts but not in MutableMapping def __repr__(self): return repr(self.data) def copy(self): if self.__class__ is UserDict: return UserDict(self.data.copy()) import copy data = self.data try: self.data = {} c = copy.copy(self) finally: self.data = data c.update(self) return c @classmethod def fromkeys(cls, iterable, value=None): d = cls() for key in iterable: d[key] = value return d ################################################################################ ### UserList ################################################################################ class UserList(MutableSequence): """A more or less complete user-defined wrapper around list objects.""" def __init__(self, initlist=None): self.data = [] if initlist is not None: # XXX should this accept an arbitrary sequence? if type(initlist) == type(self.data): self.data[:] = initlist elif isinstance(initlist, UserList): self.data[:] = initlist.data[:] else: self.data = list(initlist) def __repr__(self): return repr(self.data) def __lt__(self, other): return self.data < self.__cast(other) def __le__(self, other): return self.data <= self.__cast(other) def __eq__(self, other): return self.data == self.__cast(other) def __gt__(self, other): return self.data > self.__cast(other) def __ge__(self, other): return self.data >= self.__cast(other) def __cast(self, other): return other.data if isinstance(other, UserList) else other def __contains__(self, item): return item in self.data def __len__(self): return len(self.data) def __getitem__(self, i): return self.data[i] def __setitem__(self, i, item): self.data[i] = item def __delitem__(self, i): del self.data[i] def __add__(self, other): if isinstance(other, UserList): return self.__class__(self.data + other.data) elif isinstance(other, type(self.data)): return self.__class__(self.data + other) return self.__class__(self.data + list(other)) def __radd__(self, other): if isinstance(other, UserList): return self.__class__(other.data + self.data) elif isinstance(other, type(self.data)): return self.__class__(other + self.data) return self.__class__(list(other) + self.data) def __iadd__(self, other): if isinstance(other, UserList): self.data += other.data elif isinstance(other, type(self.data)): self.data += other else: self.data += list(other) return self def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __imul__(self, n): self.data *= n return self def append(self, item): self.data.append(item) def insert(self, i, item): self.data.insert(i, item) def pop(self, i=-1): return self.data.pop(i) def remove(self, item): self.data.remove(item) def clear(self): self.data.clear() def copy(self): return self.__class__(self) def count(self, item): return self.data.count(item) def index(self, item, *args): return self.data.index(item, *args) def reverse(self): self.data.reverse() def sort(self, *args, **kwds): self.data.sort(*args, **kwds) def extend(self, other): if isinstance(other, UserList): self.data.extend(other.data) else: self.data.extend(other) ################################################################################ ### UserString ################################################################################ class UserString(Sequence): def __init__(self, seq): if isinstance(seq, str): self.data = seq elif isinstance(seq, UserString): self.data = seq.data[:] else: self.data = str(seq) def __str__(self): return str(self.data) def __repr__(self): return repr(self.data) def __int__(self): return int(self.data) def __float__(self): return float(self.data) def __complex__(self): return complex(self.data) def __hash__(self): return hash(self.data) def __getnewargs__(self): return (self.data[:],) def __eq__(self, string): if isinstance(string, UserString): return self.data == string.data return self.data == string def __lt__(self, string): if isinstance(string, UserString): return self.data < string.data return self.data < string def __le__(self, string): if isinstance(string, UserString): return self.data <= string.data return self.data <= string def __gt__(self, string): if isinstance(string, UserString): return self.data > string.data return self.data > string def __ge__(self, string): if isinstance(string, UserString): return self.data >= string.data return self.data >= string def __contains__(self, char): if isinstance(char, UserString): char = char.data return char in self.data def __len__(self): return len(self.data) def __getitem__(self, index): return self.__class__(self.data[index]) def __add__(self, other): if isinstance(other, UserString): return self.__class__(self.data + other.data) elif isinstance(other, str): return self.__class__(self.data + other) return self.__class__(self.data + str(other)) def __radd__(self, other): if isinstance(other, str): return self.__class__(other + self.data) return self.__class__(str(other) + self.data) def __mul__(self, n): return self.__class__(self.data*n) __rmul__ = __mul__ def __mod__(self, args): return self.__class__(self.data % args) def __rmod__(self, format): return self.__class__(format % args) # the following methods are defined in alphabetical order: def capitalize(self): return self.__class__(self.data.capitalize()) def casefold(self): return self.__class__(self.data.casefold()) def center(self, width, *args): return self.__class__(self.data.center(width, *args)) def count(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.count(sub, start, end) def encode(self, encoding=None, errors=None): # XXX improve this? if encoding: if errors: return self.__class__(self.data.encode(encoding, errors)) return self.__class__(self.data.encode(encoding)) return self.__class__(self.data.encode()) def endswith(self, suffix, start=0, end=_sys.maxsize): return self.data.endswith(suffix, start, end) def expandtabs(self, tabsize=8): return self.__class__(self.data.expandtabs(tabsize)) def find(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.find(sub, start, end) def format(self, *args, **kwds): return self.data.format(*args, **kwds) def format_map(self, mapping): return self.data.format_map(mapping) def index(self, sub, start=0, end=_sys.maxsize): return self.data.index(sub, start, end) def isalpha(self): return self.data.isalpha() def isalnum(self): return self.data.isalnum() def isdecimal(self): return self.data.isdecimal() def isdigit(self): return self.data.isdigit() def isidentifier(self): return self.data.isidentifier() def islower(self): return self.data.islower() def isnumeric(self): return self.data.isnumeric() def isprintable(self): return self.data.isprintable() def isspace(self): return self.data.isspace() def istitle(self): return self.data.istitle() def isupper(self): return self.data.isupper() def join(self, seq): return self.data.join(seq) def ljust(self, width, *args): return self.__class__(self.data.ljust(width, *args)) def lower(self): return self.__class__(self.data.lower()) def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) maketrans = str.maketrans def partition(self, sep): return self.data.partition(sep) def replace(self, old, new, maxsplit=-1): if isinstance(old, UserString): old = old.data if isinstance(new, UserString): new = new.data return self.__class__(self.data.replace(old, new, maxsplit)) def rfind(self, sub, start=0, end=_sys.maxsize): if isinstance(sub, UserString): sub = sub.data return self.data.rfind(sub, start, end) def rindex(self, sub, start=0, end=_sys.maxsize): return self.data.rindex(sub, start, end) def rjust(self, width, *args): return self.__class__(self.data.rjust(width, *args)) def rpartition(self, sep): return self.data.rpartition(sep) def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars)) def split(self, sep=None, maxsplit=-1): return self.data.split(sep, maxsplit) def rsplit(self, sep=None, maxsplit=-1): return self.data.rsplit(sep, maxsplit) def splitlines(self, keepends=False): return self.data.splitlines(keepends) def startswith(self, prefix, start=0, end=_sys.maxsize): return self.data.startswith(prefix, start, end) def strip(self, chars=None): return self.__class__(self.data.strip(chars)) def swapcase(self): return self.__class__(self.data.swapcase()) def title(self): return self.__class__(self.data.title()) def translate(self, *args): return self.__class__(self.data.translate(*args)) def upper(self): return self.__class__(self.data.upper()) def zfill(self, width): return self.__class__(self.data.zfill(width)) usr/lib64/python3.6/multiprocessing/__init__.py000064400000001633151027352140015410 0ustar00# # Package analogous to 'threading.py' but using processes # # multiprocessing/__init__.py # # This package is intended to duplicate the functionality (and much of # the API) of threading.py but uses processes instead of threads. A # subpackage 'multiprocessing.dummy' has the same API but is a simple # wrapper for 'threading'. # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # import sys from . import context # # Copy stuff from default context # globals().update((name, getattr(context._default_context, name)) for name in context._default_context.__all__) __all__ = context._default_context.__all__ # # XXX These should not really be documented or public. # SUBDEBUG = 5 SUBWARNING = 25 # # Alias for main module -- will be reset by bootstrapping child processes # if '__main__' in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] usr/lib/python3.6/site-packages/nftables/__init__.py000064400000000030151027352210016311 0ustar00from .nftables import * usr/lib64/python2.7/ensurepip/__init__.py000064400000016052151027354300014174 0ustar00#! /usr/bin/python2.7 from __future__ import print_function import distutils.version import glob import os import os.path import shutil import sys import tempfile __all__ = ["version", "bootstrap"] _WHEEL_DIR = "/usr/share/python{}-wheels/".format(sys.version_info[0]) def _get_most_recent_wheel_version(pkg): prefix = os.path.join(_WHEEL_DIR, "{}-".format(pkg)) suffix = "-py2.py3-none-any.whl" pattern = "{}*{}".format(prefix, suffix) versions = (p[len(prefix):-len(suffix)] for p in glob.glob(pattern)) return str(max(versions, key=distutils.version.LooseVersion)) _SETUPTOOLS_VERSION = _get_most_recent_wheel_version("setuptools") _PIP_VERSION = _get_most_recent_wheel_version("pip") _PROJECTS = [ ("setuptools", _SETUPTOOLS_VERSION), ("pip", _PIP_VERSION), ] def _run_pip(args, additional_paths=None): # Add our bundled software to the sys.path so we can import it if additional_paths is not None: sys.path = additional_paths + sys.path # Install the bundled software try: # pip 10 from pip._internal import main except ImportError: # pip 9 from pip import main return main(args) def version(): """ Returns a string specifying the bundled version of pip. """ return _PIP_VERSION def _disable_pip_configuration_settings(): # We deliberately ignore all pip environment variables # when invoking pip # See http://bugs.python.org/issue19734 for details keys_to_remove = [k for k in os.environ if k.startswith("PIP_")] for k in keys_to_remove: del os.environ[k] # We also ignore the settings in the default pip configuration file # See http://bugs.python.org/issue20053 for details os.environ['PIP_CONFIG_FILE'] = os.devnull def bootstrap(root=None, upgrade=False, user=False, altinstall=False, default_pip=True, verbosity=0): """ Bootstrap pip into the current Python installation (or the given root directory). Note that calling this function will alter both sys.path and os.environ. """ # Discard the return value _bootstrap(root=root, upgrade=upgrade, user=user, altinstall=altinstall, default_pip=default_pip, verbosity=verbosity) def _bootstrap(root=None, upgrade=False, user=False, altinstall=False, default_pip=True, verbosity=0): """ Bootstrap pip into the current Python installation (or the given root directory). Returns pip command status code. Note that calling this function will alter both sys.path and os.environ. """ if altinstall and default_pip: raise ValueError("Cannot use altinstall and default_pip together") _disable_pip_configuration_settings() # By default, installing pip and setuptools installs all of the # following scripts (X.Y == running Python version): # # pip, pipX, pipX.Y, easy_install, easy_install-X.Y # # pip 1.5+ allows ensurepip to request that some of those be left out if altinstall: # omit pip, pipX and easy_install os.environ["ENSUREPIP_OPTIONS"] = "altinstall" elif not default_pip: # omit pip and easy_install os.environ["ENSUREPIP_OPTIONS"] = "install" tmpdir = tempfile.mkdtemp() try: # Put our bundled wheels into a temporary directory and construct the # additional paths that need added to sys.path additional_paths = [] for project, version in _PROJECTS: wheel_name = "{}-{}-py2.py3-none-any.whl".format(project, version) with open(os.path.join(_WHEEL_DIR, wheel_name), "rb") as sfp: with open(os.path.join(tmpdir, wheel_name), "wb") as fp: fp.write(sfp.read()) additional_paths.append(os.path.join(tmpdir, wheel_name)) # Construct the arguments to be passed to the pip command args = ["install", "--no-index", "--find-links", tmpdir] if root: args += ["--root", root] if upgrade: args += ["--upgrade"] if user: args += ["--user"] if verbosity: args += ["-" + "v" * verbosity] return _run_pip(args + [p[0] for p in _PROJECTS], additional_paths) finally: shutil.rmtree(tmpdir, ignore_errors=True) def _uninstall_helper(verbosity=0): """Helper to support a clean default uninstall process on Windows Note that calling this function may alter os.environ. """ # Nothing to do if pip was never installed, or has been removed try: import pip except ImportError: return # If the pip version doesn't match the bundled one, leave it alone if pip.__version__ != _PIP_VERSION: msg = ("ensurepip will only uninstall a matching version " "({!r} installed, {!r} bundled)") print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr) return _disable_pip_configuration_settings() # Construct the arguments to be passed to the pip command args = ["uninstall", "-y", "--disable-pip-version-check"] if verbosity: args += ["-" + "v" * verbosity] return _run_pip(args + [p[0] for p in reversed(_PROJECTS)]) def _main(argv=None): import argparse parser = argparse.ArgumentParser(prog="python -m ensurepip") parser.add_argument( "--version", action="version", version="pip {}".format(version()), help="Show the version of pip that is bundled with this Python.", ) parser.add_argument( "-v", "--verbose", action="count", default=0, dest="verbosity", help=("Give more output. Option is additive, and can be used up to 3 " "times."), ) parser.add_argument( "-U", "--upgrade", action="store_true", default=False, help="Upgrade pip and dependencies, even if already installed.", ) parser.add_argument( "--user", action="store_true", default=False, help="Install using the user scheme.", ) parser.add_argument( "--root", default=None, help="Install everything relative to this alternate root directory.", ) parser.add_argument( "--altinstall", action="store_true", default=False, help=("Make an alternate install, installing only the X.Y versioned " "scripts (Default: pipX, pipX.Y, easy_install-X.Y)."), ) parser.add_argument( "--default-pip", action="store_true", default=True, dest="default_pip", help=argparse.SUPPRESS, ) parser.add_argument( "--no-default-pip", action="store_false", dest="default_pip", help=("Make a non default install, installing only the X and X.Y " "versioned scripts."), ) args = parser.parse_args(argv) return _bootstrap( root=args.root, upgrade=args.upgrade, user=args.user, verbosity=args.verbosity, altinstall=args.altinstall, default_pip=args.default_pip, ) usr/lib64/python3.6/asyncio/__init__.py000064400000002634151027356120013632 0ustar00"""The asyncio package, tracking PEP 3156.""" import sys # The selectors module is in the stdlib in Python 3.4 but not in 3.3. # Do this first, so the other submodules can use "from . import selectors". # Prefer asyncio/selectors.py over the stdlib one, as ours may be newer. try: from . import selectors except ImportError: import selectors # Will also be exported. if sys.platform == 'win32': # Similar thing for _overlapped. try: from . import _overlapped except ImportError: import _overlapped # Will also be exported. # This relies on each of the submodules having an __all__ variable. from .base_events import * from .coroutines import * from .events import * from .futures import * from .locks import * from .protocols import * from .queues import * from .streams import * from .subprocess import * from .tasks import * from .transports import * __all__ = (base_events.__all__ + coroutines.__all__ + events.__all__ + futures.__all__ + locks.__all__ + protocols.__all__ + queues.__all__ + streams.__all__ + subprocess.__all__ + tasks.__all__ + transports.__all__) if sys.platform == 'win32': # pragma: no cover from .windows_events import * __all__ += windows_events.__all__ else: from .unix_events import * # pragma: no cover __all__ += unix_events.__all__ usr/lib64/python2.7/email/__init__.py000064400000005450151027417230013253 0ustar00# Copyright (C) 2001-2006 Python Software Foundation # Author: Barry Warsaw # Contact: email-sig@python.org """A package for parsing, handling, and generating email messages.""" __version__ = '4.0.3' __all__ = [ # Old names 'base64MIME', 'Charset', 'Encoders', 'Errors', 'Generator', 'Header', 'Iterators', 'Message', 'MIMEAudio', 'MIMEBase', 'MIMEImage', 'MIMEMessage', 'MIMEMultipart', 'MIMENonMultipart', 'MIMEText', 'Parser', 'quopriMIME', 'Utils', 'message_from_string', 'message_from_file', # new names 'base64mime', 'charset', 'encoders', 'errors', 'generator', 'header', 'iterators', 'message', 'mime', 'parser', 'quoprimime', 'utils', ] # Some convenience routines. Don't import Parser and Message as side-effects # of importing email since those cascadingly import most of the rest of the # email package. def message_from_string(s, *args, **kws): """Parse a string into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from email.parser import Parser return Parser(*args, **kws).parsestr(s) def message_from_file(fp, *args, **kws): """Read a file and parse its contents into a Message object model. Optional _class and strict are passed to the Parser constructor. """ from email.parser import Parser return Parser(*args, **kws).parse(fp) # Lazy loading to provide name mapping from new-style names (PEP 8 compatible # email 4.0 module names), to old-style names (email 3.0 module names). import sys class LazyImporter(object): def __init__(self, module_name): self.__name__ = 'email.' + module_name def __getattr__(self, name): __import__(self.__name__) mod = sys.modules[self.__name__] self.__dict__.update(mod.__dict__) return getattr(mod, name) _LOWERNAMES = [ # email. -> email. 'Charset', 'Encoders', 'Errors', 'FeedParser', 'Generator', 'Header', 'Iterators', 'Message', 'Parser', 'Utils', 'base64MIME', 'quopriMIME', ] _MIMENAMES = [ # email.MIME -> email.mime. 'Audio', 'Base', 'Image', 'Message', 'Multipart', 'NonMultipart', 'Text', ] for _name in _LOWERNAMES: importer = LazyImporter(_name.lower()) sys.modules['email.' + _name] = importer setattr(sys.modules['email'], _name, importer) import email.mime for _name in _MIMENAMES: importer = LazyImporter('mime.' + _name.lower()) sys.modules['email.MIME' + _name] = importer setattr(sys.modules['email'], 'MIME' + _name, importer) setattr(sys.modules['email.mime'], _name, importer) usr/lib64/python2.7/ctypes/macholib/__init__.py000064400000000232151027610670015245 0ustar00""" Enough Mach-O to make your head spin. See the relevant header files in /usr/include/mach-o And also Apple's documentation. """ __version__ = '1.0' usr/lib/python3.6/site-packages/html2text/__init__.py000064400000100004151027610720016453 0ustar00"""html2text: Turn HTML into equivalent Markdown-structured text.""" import html.entities import html.parser import re import urllib.parse as urlparse from textwrap import wrap from html2text import config from html2text.utils import ( dumb_css_parser, element_style, escape_md, escape_md_section, google_fixed_width_font, google_has_height, google_list_style, google_text_emphasis, hn, list_numbering_start, pad_tables_in_text, skipwrap, unifiable_n, ) __version__ = (2019, 9, 26) # TODO: # Support decoded entities with UNIFIABLE. class HTML2Text(html.parser.HTMLParser): def __init__(self, out=None, baseurl="", bodywidth=config.BODY_WIDTH): """ Input parameters: out: possible custom replacement for self.outtextf (which appends lines of text). baseurl: base URL of the document we process """ super().__init__(convert_charrefs=False) # Config options self.split_next_td = False self.td_count = 0 self.table_start = False self.unicode_snob = config.UNICODE_SNOB # covered in cli self.escape_snob = config.ESCAPE_SNOB # covered in cli self.links_each_paragraph = config.LINKS_EACH_PARAGRAPH self.body_width = bodywidth # covered in cli self.skip_internal_links = config.SKIP_INTERNAL_LINKS # covered in cli self.inline_links = config.INLINE_LINKS # covered in cli self.protect_links = config.PROTECT_LINKS # covered in cli self.google_list_indent = config.GOOGLE_LIST_INDENT # covered in cli self.ignore_links = config.IGNORE_ANCHORS # covered in cli self.ignore_images = config.IGNORE_IMAGES # covered in cli self.images_as_html = config.IMAGES_AS_HTML # covered in cli self.images_to_alt = config.IMAGES_TO_ALT # covered in cli self.images_with_size = config.IMAGES_WITH_SIZE # covered in cli self.ignore_emphasis = config.IGNORE_EMPHASIS # covered in cli self.bypass_tables = config.BYPASS_TABLES # covered in cli self.ignore_tables = config.IGNORE_TABLES # covered in cli self.google_doc = False # covered in cli self.ul_item_mark = "*" # covered in cli self.emphasis_mark = "_" # covered in cli self.strong_mark = "**" self.single_line_break = config.SINGLE_LINE_BREAK # covered in cli self.use_automatic_links = config.USE_AUTOMATIC_LINKS # covered in cli self.hide_strikethrough = False # covered in cli self.mark_code = config.MARK_CODE self.wrap_list_items = config.WRAP_LIST_ITEMS # covered in cli self.wrap_links = config.WRAP_LINKS # covered in cli self.pad_tables = config.PAD_TABLES # covered in cli self.default_image_alt = config.DEFAULT_IMAGE_ALT # covered in cli self.tag_callback = None self.open_quote = config.OPEN_QUOTE # covered in cli self.close_quote = config.CLOSE_QUOTE # covered in cli if out is None: self.out = self.outtextf else: self.out = out # empty list to store output characters before they are "joined" self.outtextlist = [] self.quiet = 0 self.p_p = 0 # number of newline character to print before next output self.outcount = 0 self.start = True self.space = False self.a = [] self.astack = [] self.maybe_automatic_link = None self.empty_link = False self.absolute_url_matcher = re.compile(r"^[a-zA-Z+]+://") self.acount = 0 self.list = [] self.blockquote = 0 self.pre = False self.startpre = False self.code = False self.quote = False self.br_toggle = "" self.lastWasNL = False self.lastWasList = False self.style = 0 self.style_def = {} self.tag_stack = [] self.emphasis = 0 self.drop_white_space = 0 self.inheader = False self.abbr_title = None # current abbreviation definition self.abbr_data = None # last inner HTML (for abbr being defined) self.abbr_list = {} # stack of abbreviations to write later self.baseurl = baseurl self.stressed = False self.preceding_stressed = False self.preceding_data = None self.current_tag = None config.UNIFIABLE["nbsp"] = " _place_holder;" def feed(self, data): data = data.replace("", "") super().feed(data) def handle(self, data): self.feed(data) self.feed("") markdown = self.optwrap(self.close()) if self.pad_tables: return pad_tables_in_text(markdown) else: return markdown def outtextf(self, s): self.outtextlist.append(s) if s: self.lastWasNL = s[-1] == "\n" def close(self): super().close() self.pbr() self.o("", force="end") outtext = "".join(self.outtextlist) if self.unicode_snob: nbsp = html.entities.html5["nbsp;"] else: nbsp = " " outtext = outtext.replace(" _place_holder;", nbsp) # Clear self.outtextlist to avoid memory leak of its content to # the next handling. self.outtextlist = [] return outtext def handle_charref(self, c): self.handle_data(self.charref(c), True) def handle_entityref(self, c): ref = self.entityref(c) # ref may be an empty string (e.g. for ‎/‏ markers that should # not contribute to the final output). # self.handle_data cannot handle a zero-length string right after a # stressed tag or mid-text within a stressed tag (text get split and # self.stressed/self.preceding_stressed gets switched after the first # part of that text). if ref: self.handle_data(ref, True) def handle_starttag(self, tag, attrs): self.handle_tag(tag, attrs, start=True) def handle_endtag(self, tag): self.handle_tag(tag, None, start=False) def previousIndex(self, attrs): """ :type attrs: dict :returns: The index of certain set of attributes (of a link) in the self.a list. If the set of attributes is not found, returns None :rtype: int """ if "href" not in attrs: return None match = False for i, a in enumerate(self.a): if "href" in a and a["href"] == attrs["href"]: if "title" in a or "title" in attrs: if ( "title" in a and "title" in attrs and a["title"] == attrs["title"] ): match = True else: match = True if match: return i return None def handle_emphasis(self, start, tag_style, parent_style): """ Handles various text emphases """ tag_emphasis = google_text_emphasis(tag_style) parent_emphasis = google_text_emphasis(parent_style) # handle Google's text emphasis strikethrough = "line-through" in tag_emphasis and self.hide_strikethrough # google and others may mark a font's weight as `bold` or `700` bold = False for bold_marker in config.BOLD_TEXT_STYLE_VALUES: bold = bold_marker in tag_emphasis and bold_marker not in parent_emphasis if bold: break italic = "italic" in tag_emphasis and "italic" not in parent_emphasis fixed = ( google_fixed_width_font(tag_style) and not google_fixed_width_font(parent_style) and not self.pre ) if start: # crossed-out text must be handled before other attributes # in order not to output qualifiers unnecessarily if bold or italic or fixed: self.emphasis += 1 if strikethrough: self.quiet += 1 if italic: self.o(self.emphasis_mark) self.drop_white_space += 1 if bold: self.o(self.strong_mark) self.drop_white_space += 1 if fixed: self.o("`") self.drop_white_space += 1 self.code = True else: if bold or italic or fixed: # there must not be whitespace before closing emphasis mark self.emphasis -= 1 self.space = False if fixed: if self.drop_white_space: # empty emphasis, drop it self.drop_white_space -= 1 else: self.o("`") self.code = False if bold: if self.drop_white_space: # empty emphasis, drop it self.drop_white_space -= 1 else: self.o(self.strong_mark) if italic: if self.drop_white_space: # empty emphasis, drop it self.drop_white_space -= 1 else: self.o(self.emphasis_mark) # space is only allowed after *all* emphasis marks if (bold or italic) and not self.emphasis: self.o(" ") if strikethrough: self.quiet -= 1 def handle_tag(self, tag, attrs, start): self.current_tag = tag # attrs is None for endtags if attrs is None: attrs = {} else: attrs = dict(attrs) if self.tag_callback is not None: if self.tag_callback(self, tag, attrs, start) is True: return # first thing inside the anchor tag is another tag # that produces some output if ( start and self.maybe_automatic_link is not None and tag not in ["p", "div", "style", "dl", "dt"] and (tag != "img" or self.ignore_images) ): self.o("[") self.maybe_automatic_link = None self.empty_link = False if self.google_doc: # the attrs parameter is empty for a closing tag. in addition, we # need the attributes of the parent nodes in order to get a # complete style description for the current element. we assume # that google docs export well formed html. parent_style = {} if start: if self.tag_stack: parent_style = self.tag_stack[-1][2] tag_style = element_style(attrs, self.style_def, parent_style) self.tag_stack.append((tag, attrs, tag_style)) else: dummy, attrs, tag_style = ( self.tag_stack.pop() if self.tag_stack else (None, {}, {}) ) if self.tag_stack: parent_style = self.tag_stack[-1][2] if hn(tag): self.p() if start: self.inheader = True self.o(hn(tag) * "#" + " ") else: self.inheader = False return # prevent redundant emphasis marks on headers if tag in ["p", "div"]: if self.google_doc: if start and google_has_height(tag_style): self.p() else: self.soft_br() elif self.astack and tag == "div": pass else: self.p() if tag == "br" and start: if self.blockquote > 0: self.o(" \n> ") else: self.o(" \n") if tag == "hr" and start: self.p() self.o("* * *") self.p() if tag in ["head", "style", "script"]: if start: self.quiet += 1 else: self.quiet -= 1 if tag == "style": if start: self.style += 1 else: self.style -= 1 if tag in ["body"]: self.quiet = 0 # sites like 9rules.com never close if tag == "blockquote": if start: self.p() self.o("> ", force=True) self.start = True self.blockquote += 1 else: self.blockquote -= 1 self.p() def no_preceding_space(self): return self.preceding_data and re.match(r"[^\s]", self.preceding_data[-1]) if tag in ["em", "i", "u"] and not self.ignore_emphasis: if start and no_preceding_space(self): emphasis = " " + self.emphasis_mark else: emphasis = self.emphasis_mark self.o(emphasis) if start: self.stressed = True if tag in ["strong", "b"] and not self.ignore_emphasis: if start and no_preceding_space(self): strong = " " + self.strong_mark else: strong = self.strong_mark self.o(strong) if start: self.stressed = True if tag in ["del", "strike", "s"]: if start and no_preceding_space(self): strike = " ~~" else: strike = "~~" self.o(strike) if start: self.stressed = True if self.google_doc: if not self.inheader: # handle some font attributes, but leave headers clean self.handle_emphasis(start, tag_style, parent_style) if tag in ["kbd", "code", "tt"] and not self.pre: self.o("`") # TODO: `` `this` `` self.code = not self.code if tag == "abbr": if start: self.abbr_title = None self.abbr_data = "" if "title" in attrs: self.abbr_title = attrs["title"] else: if self.abbr_title is not None: self.abbr_list[self.abbr_data] = self.abbr_title self.abbr_title = None self.abbr_data = None if tag == "q": if not self.quote: self.o(self.open_quote) else: self.o(self.close_quote) self.quote = not self.quote def link_url(self, link, title=""): url = urlparse.urljoin(self.baseurl, link) title = ' "{}"'.format(title) if title.strip() else "" self.o("]({url}{title})".format(url=escape_md(url), title=title)) if tag == "a" and not self.ignore_links: if start: if ( "href" in attrs and attrs["href"] is not None and not (self.skip_internal_links and attrs["href"].startswith("#")) ): self.astack.append(attrs) self.maybe_automatic_link = attrs["href"] self.empty_link = True if self.protect_links: attrs["href"] = "<" + attrs["href"] + ">" else: self.astack.append(None) else: if self.astack: a = self.astack.pop() if self.maybe_automatic_link and not self.empty_link: self.maybe_automatic_link = None elif a: if self.empty_link: self.o("[") self.empty_link = False self.maybe_automatic_link = None if self.inline_links: try: title = a["title"] if a["title"] else "" title = escape_md(title) except KeyError: link_url(self, a["href"], "") else: link_url(self, a["href"], title) else: i = self.previousIndex(a) if i is not None: a = self.a[i] else: self.acount += 1 a["count"] = self.acount a["outcount"] = self.outcount self.a.append(a) self.o("][" + str(a["count"]) + "]") if tag == "img" and start and not self.ignore_images: if "src" in attrs: if not self.images_to_alt: attrs["href"] = attrs["src"] alt = attrs.get("alt") or self.default_image_alt # If we have images_with_size, write raw html including width, # height, and alt attributes if self.images_as_html or ( self.images_with_size and ("width" in attrs or "height" in attrs) ): self.o("") return # If we have a link to create, output the start if self.maybe_automatic_link is not None: href = self.maybe_automatic_link if ( self.images_to_alt and escape_md(alt) == href and self.absolute_url_matcher.match(href) ): self.o("<" + escape_md(alt) + ">") self.empty_link = False return else: self.o("[") self.maybe_automatic_link = None self.empty_link = False # If we have images_to_alt, we discard the image itself, # considering only the alt text. if self.images_to_alt: self.o(escape_md(alt)) else: self.o("![" + escape_md(alt) + "]") if self.inline_links: href = attrs.get("href") or "" self.o( "(" + escape_md(urlparse.urljoin(self.baseurl, href)) + ")" ) else: i = self.previousIndex(attrs) if i is not None: attrs = self.a[i] else: self.acount += 1 attrs["count"] = self.acount attrs["outcount"] = self.outcount self.a.append(attrs) self.o("[" + str(attrs["count"]) + "]") if tag == "dl" and start: self.p() if tag == "dt" and not start: self.pbr() if tag == "dd" and start: self.o(" ") if tag == "dd" and not start: self.pbr() if tag in ["ol", "ul"]: # Google Docs create sub lists as top level lists if not self.list and not self.lastWasList: self.p() if start: if self.google_doc: list_style = google_list_style(tag_style) else: list_style = tag numbering_start = list_numbering_start(attrs) self.list.append({"name": list_style, "num": numbering_start}) else: if self.list: self.list.pop() if not self.google_doc and not self.list: self.o("\n") self.lastWasList = True else: self.lastWasList = False if tag == "li": self.pbr() if start: if self.list: li = self.list[-1] else: li = {"name": "ul", "num": 0} if self.google_doc: nest_count = self.google_nest_count(tag_style) else: nest_count = len(self.list) # TODO: line up
  1. s > 9 correctly. self.o(" " * nest_count) if li["name"] == "ul": self.o(self.ul_item_mark + " ") elif li["name"] == "ol": li["num"] += 1 self.o(str(li["num"]) + ". ") self.start = True if tag in ["table", "tr", "td", "th"]: if self.ignore_tables: if tag == "tr": if start: pass else: self.soft_br() else: pass elif self.bypass_tables: if start: self.soft_br() if tag in ["td", "th"]: if start: self.o("<{}>\n\n".format(tag)) else: self.o("\n".format(tag)) else: if start: self.o("<{}>".format(tag)) else: self.o("".format(tag)) else: if tag == "table": if start: self.table_start = True if self.pad_tables: self.o("<" + config.TABLE_MARKER_FOR_PAD + ">") self.o(" \n") else: if self.pad_tables: self.o("") self.o(" \n") if tag in ["td", "th"] and start: if self.split_next_td: self.o("| ") self.split_next_td = True if tag == "tr" and start: self.td_count = 0 if tag == "tr" and not start: self.split_next_td = False self.soft_br() if tag == "tr" and not start and self.table_start: # Underline table header self.o("|".join(["---"] * self.td_count)) self.soft_br() self.table_start = False if tag in ["td", "th"] and start: self.td_count += 1 if tag == "pre": if start: self.startpre = True self.pre = True else: self.pre = False if self.mark_code: self.out("\n[/code]") self.p() # TODO: Add docstring for these one letter functions def pbr(self): "Pretty print has a line break" if self.p_p == 0: self.p_p = 1 def p(self): "Set pretty print to 1 or 2 lines" self.p_p = 1 if self.single_line_break else 2 def soft_br(self): "Soft breaks" self.pbr() self.br_toggle = " " def o(self, data, puredata=False, force=False): """ Deal with indentation and whitespace """ if self.abbr_data is not None: self.abbr_data += data if not self.quiet: if self.google_doc: # prevent white space immediately after 'begin emphasis' # marks ('**' and '_') lstripped_data = data.lstrip() if self.drop_white_space and not (self.pre or self.code): data = lstripped_data if lstripped_data != "": self.drop_white_space = 0 if puredata and not self.pre: # This is a very dangerous call ... it could mess up # all handling of   when not handled properly # (see entityref) data = re.sub(r"\s+", r" ", data) if data and data[0] == " ": self.space = True data = data[1:] if not data and not force: return if self.startpre: # self.out(" :") #TODO: not output when already one there if not data.startswith("\n") and not data.startswith("\r\n"): #
    stuff...
                        data = "\n" + data
                    if self.mark_code:
                        self.out("\n[code]")
                        self.p_p = 0
    
                bq = ">" * self.blockquote
                if not (force and data and data[0] == ">") and self.blockquote:
                    bq += " "
    
                if self.pre:
                    if not self.list:
                        bq += "    "
                    # else: list content is already partially indented
                    for i in range(len(self.list)):
                        bq += "    "
                    data = data.replace("\n", "\n" + bq)
    
                if self.startpre:
                    self.startpre = False
                    if self.list:
                        # use existing initial indentation
                        data = data.lstrip("\n")
    
                if self.start:
                    self.space = False
                    self.p_p = 0
                    self.start = False
    
                if force == "end":
                    # It's the end.
                    self.p_p = 0
                    self.out("\n")
                    self.space = False
    
                if self.p_p:
                    self.out((self.br_toggle + "\n" + bq) * self.p_p)
                    self.space = False
                    self.br_toggle = ""
    
                if self.space:
                    if not self.lastWasNL:
                        self.out(" ")
                    self.space = False
    
                if self.a and (
                    (self.p_p == 2 and self.links_each_paragraph) or force == "end"
                ):
                    if force == "end":
                        self.out("\n")
    
                    newa = []
                    for link in self.a:
                        if self.outcount > link["outcount"]:
                            self.out(
                                "   ["
                                + str(link["count"])
                                + "]: "
                                + urlparse.urljoin(self.baseurl, link["href"])
                            )
                            if "title" in link:
                                self.out(" (" + link["title"] + ")")
                            self.out("\n")
                        else:
                            newa.append(link)
    
                    # Don't need an extra line when nothing was done.
                    if self.a != newa:
                        self.out("\n")
    
                    self.a = newa
    
                if self.abbr_list and force == "end":
                    for abbr, definition in self.abbr_list.items():
                        self.out("  *[" + abbr + "]: " + definition + "\n")
    
                self.p_p = 0
                self.out(data)
                self.outcount += 1
    
        def handle_data(self, data, entity_char=False):
            if not data:
                # Data may be empty for some HTML entities. For example,
                # LEFT-TO-RIGHT MARK.
                return
    
            if self.stressed:
                data = data.strip()
                self.stressed = False
                self.preceding_stressed = True
            elif self.preceding_stressed:
                if (
                    re.match(r"[^\s.!?]", data[0])
                    and not hn(self.current_tag)
                    and self.current_tag not in ["a", "code", "pre"]
                ):
                    # should match a letter or common punctuation
                    data = " " + data
                self.preceding_stressed = False
    
            if self.style:
                self.style_def.update(dumb_css_parser(data))
    
            if self.maybe_automatic_link is not None:
                href = self.maybe_automatic_link
                if (
                    href == data
                    and self.absolute_url_matcher.match(href)
                    and self.use_automatic_links
                ):
                    self.o("<" + data + ">")
                    self.empty_link = False
                    return
                else:
                    self.o("[")
                    self.maybe_automatic_link = None
                    self.empty_link = False
    
            if not self.code and not self.pre and not entity_char:
                data = escape_md_section(data, snob=self.escape_snob)
            self.preceding_data = data
            self.o(data, puredata=True)
    
        def charref(self, name):
            if name[0] in ["x", "X"]:
                c = int(name[1:], 16)
            else:
                c = int(name)
    
            if not self.unicode_snob and c in unifiable_n:
                return unifiable_n[c]
            else:
                try:
                    return chr(c)
                except ValueError:  # invalid unicode
                    return ""
    
        def entityref(self, c):
            if not self.unicode_snob and c in config.UNIFIABLE:
                return config.UNIFIABLE[c]
            try:
                ch = html.entities.html5[c + ";"]
            except KeyError:
                return "&" + c + ";"
            return config.UNIFIABLE[c] if c == "nbsp" else ch
    
        def google_nest_count(self, style):
            """
            Calculate the nesting count of google doc lists
    
            :type style: dict
    
            :rtype: int
            """
            nest_count = 0
            if "margin-left" in style:
                nest_count = int(style["margin-left"][:-2]) // self.google_list_indent
    
            return nest_count
    
        def optwrap(self, text):
            """
            Wrap all paragraphs in the provided text.
    
            :type text: str
    
            :rtype: str
            """
            if not self.body_width:
                return text
    
            result = ""
            newlines = 0
            # I cannot think of a better solution for now.
            # To avoid the non-wrap behaviour for entire paras
            # because of the presence of a link in it
            if not self.wrap_links:
                self.inline_links = False
            for para in text.split("\n"):
                if len(para) > 0:
                    if not skipwrap(para, self.wrap_links, self.wrap_list_items):
                        indent = ""
                        if para.startswith("  " + self.ul_item_mark):
                            # list item continuation: add a double indent to the
                            # new lines
                            indent = "    "
                        elif para.startswith("> "):
                            # blockquote continuation: add the greater than symbol
                            # to the new lines
                            indent = "> "
                        wrapped = wrap(
                            para,
                            self.body_width,
                            break_long_words=False,
                            subsequent_indent=indent,
                        )
                        result += "\n".join(wrapped)
                        if para.endswith("  "):
                            result += "  \n"
                            newlines = 1
                        elif indent:
                            result += "\n"
                            newlines = 1
                        else:
                            result += "\n\n"
                            newlines = 2
                    else:
                        # Warning for the tempted!!!
                        # Be aware that obvious replacement of this with
                        # line.isspace()
                        # DOES NOT work! Explanations are welcome.
                        if not config.RE_SPACE.match(para):
                            result += para + "\n"
                            newlines = 1
                else:
                    if newlines < 2:
                        result += "\n"
                        newlines += 1
            return result
    
    
    def html2text(html, baseurl="", bodywidth=None):
        if bodywidth is None:
            bodywidth = config.BODY_WIDTH
        h = HTML2Text(baseurl=baseurl, bodywidth=bodywidth)
    
        return h.handle(html)
    usr/lib/python3.6/site-packages/packaging/__init__.py000064400000001001151027611220016435 0ustar00# This file is dual licensed under the terms of the Apache License, Version
    # 2.0, and the BSD License. See the LICENSE file in the root of this repository
    # for complete details.
    from __future__ import absolute_import, division, print_function
    
    from .__about__ import (
        __author__, __copyright__, __email__, __license__, __summary__, __title__,
        __uri__, __version__
    )
    
    __all__ = [
        "__title__", "__summary__", "__uri__", "__version__", "__author__",
        "__email__", "__license__", "__copyright__",
    ]
    usr/lib64/python3.11/xml/etree/__init__.py000064400000003105151027611360014137 0ustar00# $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $
    # elementtree package
    
    # --------------------------------------------------------------------
    # The ElementTree toolkit is
    #
    # Copyright (c) 1999-2008 by Fredrik Lundh
    #
    # By obtaining, using, and/or copying this software and/or its
    # associated documentation, you agree that you have read, understood,
    # and will comply with the following terms and conditions:
    #
    # Permission to use, copy, modify, and distribute this software and
    # its associated documentation for any purpose and without fee is
    # hereby granted, provided that the above copyright notice appears in
    # all copies, and that both that copyright notice and this permission
    # notice appear in supporting documentation, and that the name of
    # Secret Labs AB or the author not be used in advertising or publicity
    # pertaining to distribution of the software without specific, written
    # prior permission.
    #
    # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
    # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
    # ABILITY AND FITNESS.  IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
    # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
    # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
    # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
    # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
    # OF THIS SOFTWARE.
    # --------------------------------------------------------------------
    
    # Licensed to PSF under a Contributor Agreement.
    # See https://www.python.org/psf/license for licensing details.
    usr/lib64/python3.6/email/__init__.py000064400000003346151027652520013260 0ustar00# Copyright (C) 2001-2007 Python Software Foundation
    # Author: Barry Warsaw
    # Contact: email-sig@python.org
    
    """A package for parsing, handling, and generating email messages."""
    
    __all__ = [
        'base64mime',
        'charset',
        'encoders',
        'errors',
        'feedparser',
        'generator',
        'header',
        'iterators',
        'message',
        'message_from_file',
        'message_from_binary_file',
        'message_from_string',
        'message_from_bytes',
        'mime',
        'parser',
        'quoprimime',
        'utils',
        ]
    
    
    
    # Some convenience routines.  Don't import Parser and Message as side-effects
    # of importing email since those cascadingly import most of the rest of the
    # email package.
    def message_from_string(s, *args, **kws):
        """Parse a string into a Message object model.
    
        Optional _class and strict are passed to the Parser constructor.
        """
        from email.parser import Parser
        return Parser(*args, **kws).parsestr(s)
    
    def message_from_bytes(s, *args, **kws):
        """Parse a bytes string into a Message object model.
    
        Optional _class and strict are passed to the Parser constructor.
        """
        from email.parser import BytesParser
        return BytesParser(*args, **kws).parsebytes(s)
    
    def message_from_file(fp, *args, **kws):
        """Read a file and parse its contents into a Message object model.
    
        Optional _class and strict are passed to the Parser constructor.
        """
        from email.parser import Parser
        return Parser(*args, **kws).parse(fp)
    
    def message_from_binary_file(fp, *args, **kws):
        """Read a binary file and parse its contents into a Message object model.
    
        Optional _class and strict are passed to the Parser constructor.
        """
        from email.parser import BytesParser
        return BytesParser(*args, **kws).parse(fp)
    usr/lib/python3.6/site-packages/setuptools/extern/__init__.py000064400000004703151027655160020266 0ustar00import sys
    
    
    class VendorImporter:
        """
        A PEP 302 meta path importer for finding optionally-vendored
        or otherwise naturally-installed packages from root_name.
        """
    
        def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
            self.root_name = root_name
            self.vendored_names = set(vendored_names)
            self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
    
        @property
        def search_path(self):
            """
            Search first the vendor package then as a natural package.
            """
            yield self.vendor_pkg + '.'
            yield ''
    
        def find_module(self, fullname, path=None):
            """
            Return self when fullname starts with root_name and the
            target module is one vendored through this importer.
            """
            root, base, target = fullname.partition(self.root_name + '.')
            if root:
                return
            if not any(map(target.startswith, self.vendored_names)):
                return
            return self
    
        def load_module(self, fullname):
            """
            Iterate over the search path to locate and load fullname.
            """
            root, base, target = fullname.partition(self.root_name + '.')
            for prefix in self.search_path:
                try:
                    extant = prefix + target
                    __import__(extant)
                    mod = sys.modules[extant]
                    sys.modules[fullname] = mod
                    # mysterious hack:
                    # Remove the reference to the extant package/module
                    # on later Python versions to cause relative imports
                    # in the vendor package to resolve the same modules
                    # as those going through this importer.
                    if sys.version_info > (3, 3):
                        del sys.modules[extant]
                    return mod
                except ImportError:
                    pass
            else:
                raise ImportError(
                    "The '{target}' package is required; "
                    "normally this is bundled with this package so if you get "
                    "this warning, consult the packager of your "
                    "distribution.".format(**locals())
                )
    
        def install(self):
            """
            Install this importer into sys.meta_path if not already present.
            """
            if self not in sys.meta_path:
                sys.meta_path.append(self)
    
    
    names = 'six', 'packaging', 'pyparsing',
    VendorImporter(__name__, names, 'setuptools._vendor').install()
    usr/lib/python3.6/site-packages/procfs/__init__.py000075500000000620151027662710016030 0ustar00#! /usr/bin/python3
    # -*- python -*-
    # -*- coding: utf-8 -*-
    # SPDX-License-Identifier: GPL-2.0-only
    #
    # Copyright (C) 2008, 2009  Red Hat, Inc.
    #
    """
    Copyright (c) 2008, 2009  Red Hat Inc.
    
    Abstractions to extract information from the Linux kernel /proc files.
    """
    __author__ = "Arnaldo Carvalho de Melo "
    __license__ = "GPLv2 License"
    
    from .procfs import *
    from .utilist import *
    usr/lib/python3.6/site-packages/setuptools/_vendor/__init__.py000064400000000000151027664230020375 0ustar00usr/lib/python3.6/site-packages/slip/util/__init__.py000064400000000154151027664300016454 0ustar00# -*- coding: utf-8 -*-
    
    from __future__ import absolute_import
    
    from . import hookable
    from . import files
    usr/lib/python3.6/site-packages/dnf/__init__.py000064400000002555151027664510015311 0ustar00# __init__.py
    # The toplevel DNF package.
    #
    # Copyright (C) 2012-2016 Red Hat, Inc.
    #
    # This copyrighted material is made available to anyone wishing to use,
    # modify, copy, or redistribute it subject to the terms and conditions of
    # the GNU General Public License v.2, or (at your option) any later version.
    # This program is distributed in the hope that it will be useful, but WITHOUT
    # ANY WARRANTY expressed or implied, including the implied warranties of
    # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
    # Public License for more details.  You should have received a copy of the
    # GNU General Public License along with this program; if not, write to the
    # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
    # 02110-1301, USA.  Any Red Hat trademarks that are incorporated in the
    # source code or documentation are not subject to the GNU General Public
    # License and may only be used or replicated with the express permission of
    # Red Hat, Inc.
    #
    
    from __future__ import unicode_literals
    import warnings
    import dnf.pycomp
    
    warnings.filterwarnings('once', category=DeprecationWarning, module=r'^dnf\..*$')
    
    from dnf.const import VERSION
    __version__ = VERSION  # :api
    
    import dnf.base
    Base = dnf.base.Base # :api
    
    import dnf.plugin
    Plugin = dnf.plugin.Plugin # :api
    
    # setup libraries
    dnf.pycomp.urlparse.uses_fragment.append("media")
    usr/lib64/python3.6/concurrent/futures/__init__.py000064400000001440151027755460016050 0ustar00# Copyright 2009 Brian Quinlan. All Rights Reserved.
    # Licensed to PSF under a Contributor Agreement.
    
    """Execute computations asynchronously using threads or processes."""
    
    __author__ = 'Brian Quinlan (brian@sweetapp.com)'
    
    from concurrent.futures._base import (FIRST_COMPLETED,
                                          FIRST_EXCEPTION,
                                          ALL_COMPLETED,
                                          CancelledError,
                                          TimeoutError,
                                          Future,
                                          Executor,
                                          wait,
                                          as_completed)
    from concurrent.futures.process import ProcessPoolExecutor
    from concurrent.futures.thread import ThreadPoolExecutor
    usr/lib/python2.7/site-packages/setuptools/__init__.py000064400000013104151027756370016761 0ustar00"""Extensions to the 'distutils' for large or complex distributions"""
    
    import os
    import functools
    import distutils.core
    import distutils.filelist
    from distutils.util import convert_path
    from fnmatch import fnmatchcase
    
    from setuptools.extern.six.moves import filter, map
    
    import setuptools.version
    from setuptools.extension import Extension
    from setuptools.dist import Distribution, Feature
    from setuptools.depends import Require
    from . import monkey
    
    __all__ = [
        'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
        'find_packages',
    ]
    
    __version__ = setuptools.version.__version__
    
    bootstrap_install_from = None
    
    # If we run 2to3 on .py files, should we also convert docstrings?
    # Default: yes; assume that we can detect doctests reliably
    run_2to3_on_doctests = True
    # Standard package names for fixer packages
    lib2to3_fixer_packages = ['lib2to3.fixes']
    
    
    class PackageFinder(object):
        """
        Generate a list of all Python packages found within a directory
        """
    
        @classmethod
        def find(cls, where='.', exclude=(), include=('*',)):
            """Return a list all Python packages found within directory 'where'
    
            'where' is the root directory which will be searched for packages.  It
            should be supplied as a "cross-platform" (i.e. URL-style) path; it will
            be converted to the appropriate local path syntax.
    
            'exclude' is a sequence of package names to exclude; '*' can be used
            as a wildcard in the names, such that 'foo.*' will exclude all
            subpackages of 'foo' (but not 'foo' itself).
    
            'include' is a sequence of package names to include.  If it's
            specified, only the named packages will be included.  If it's not
            specified, all found packages will be included.  'include' can contain
            shell style wildcard patterns just like 'exclude'.
            """
    
            return list(cls._find_packages_iter(
                convert_path(where),
                cls._build_filter('ez_setup', '*__pycache__', *exclude),
                cls._build_filter(*include)))
    
        @classmethod
        def _find_packages_iter(cls, where, exclude, include):
            """
            All the packages found in 'where' that pass the 'include' filter, but
            not the 'exclude' filter.
            """
            for root, dirs, files in os.walk(where, followlinks=True):
                # Copy dirs to iterate over it, then empty dirs.
                all_dirs = dirs[:]
                dirs[:] = []
    
                for dir in all_dirs:
                    full_path = os.path.join(root, dir)
                    rel_path = os.path.relpath(full_path, where)
                    package = rel_path.replace(os.path.sep, '.')
    
                    # Skip directory trees that are not valid packages
                    if ('.' in dir or not cls._looks_like_package(full_path)):
                        continue
    
                    # Should this package be included?
                    if include(package) and not exclude(package):
                        yield package
    
                    # Keep searching subdirectories, as there may be more packages
                    # down there, even if the parent was excluded.
                    dirs.append(dir)
    
        @staticmethod
        def _looks_like_package(path):
            """Does a directory look like a package?"""
            return os.path.isfile(os.path.join(path, '__init__.py'))
    
        @staticmethod
        def _build_filter(*patterns):
            """
            Given a list of patterns, return a callable that will be true only if
            the input matches at least one of the patterns.
            """
            return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
    
    
    class PEP420PackageFinder(PackageFinder):
        @staticmethod
        def _looks_like_package(path):
            return True
    
    
    find_packages = PackageFinder.find
    
    
    def _install_setup_requires(attrs):
        # Note: do not use `setuptools.Distribution` directly, as
        # our PEP 517 backend patch `distutils.core.Distribution`.
        dist = distutils.core.Distribution(dict(
            (k, v) for k, v in attrs.items()
            if k in ('dependency_links', 'setup_requires')
        ))
        # Honor setup.cfg's options.
        dist.parse_config_files(ignore_option_errors=True)
        if dist.setup_requires:
            dist.fetch_build_eggs(dist.setup_requires)
    
    
    def setup(**attrs):
        # Make sure we have any requirements needed to interpret 'attrs'.
        _install_setup_requires(attrs)
        return distutils.core.setup(**attrs)
    
    setup.__doc__ = distutils.core.setup.__doc__
    
    
    _Command = monkey.get_unpatched(distutils.core.Command)
    
    
    class Command(_Command):
        __doc__ = _Command.__doc__
    
        command_consumes_arguments = False
    
        def __init__(self, dist, **kw):
            """
            Construct the command for dist, updating
            vars(self) with any keyword parameters.
            """
            _Command.__init__(self, dist)
            vars(self).update(kw)
    
        def reinitialize_command(self, command, reinit_subcommands=0, **kw):
            cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
            vars(cmd).update(kw)
            return cmd
    
    
    def _find_all_simple(path):
        """
        Find all files under 'path'
        """
        results = (
            os.path.join(base, file)
            for base, dirs, files in os.walk(path, followlinks=True)
            for file in files
        )
        return filter(os.path.isfile, results)
    
    
    def findall(dir=os.curdir):
        """
        Find all files under 'dir' and return the list of full filenames.
        Unless dir is '.', return full filenames with dir prepended.
        """
        files = _find_all_simple(dir)
        if dir == os.curdir:
            make_rel = functools.partial(os.path.relpath, start=dir)
            files = map(make_rel, files)
        return list(files)
    
    
    monkey.patch_all()
    usr/lib64/python2.7/ctypes/__init__.py000064400000041366151027756610013511 0ustar00"""create and manipulate C data types in Python"""
    
    import os as _os, sys as _sys
    
    __version__ = "1.1.0"
    
    from _ctypes import Union, Structure, Array
    from _ctypes import _Pointer
    from _ctypes import CFuncPtr as _CFuncPtr
    from _ctypes import __version__ as _ctypes_version
    from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
    from _ctypes import ArgumentError
    
    from struct import calcsize as _calcsize
    
    if __version__ != _ctypes_version:
        raise Exception("Version number mismatch", __version__, _ctypes_version)
    
    if _os.name in ("nt", "ce"):
        from _ctypes import FormatError
    
    DEFAULT_MODE = RTLD_LOCAL
    if _os.name == "posix" and _sys.platform == "darwin":
        # On OS X 10.3, we use RTLD_GLOBAL as default mode
        # because RTLD_LOCAL does not work at least on some
        # libraries.  OS X 10.3 is Darwin 7, so we check for
        # that.
    
        if int(_os.uname()[2].split('.')[0]) < 8:
            DEFAULT_MODE = RTLD_GLOBAL
    
    from _ctypes import FUNCFLAG_CDECL as _FUNCFLAG_CDECL, \
         FUNCFLAG_PYTHONAPI as _FUNCFLAG_PYTHONAPI, \
         FUNCFLAG_USE_ERRNO as _FUNCFLAG_USE_ERRNO, \
         FUNCFLAG_USE_LASTERROR as _FUNCFLAG_USE_LASTERROR
    
    """
    WINOLEAPI -> HRESULT
    WINOLEAPI_(type)
    
    STDMETHODCALLTYPE
    
    STDMETHOD(name)
    STDMETHOD_(type, name)
    
    STDAPICALLTYPE
    """
    
    def create_string_buffer(init, size=None):
        """create_string_buffer(aString) -> character array
        create_string_buffer(anInteger) -> character array
        create_string_buffer(aString, anInteger) -> character array
        """
        if isinstance(init, (str, unicode)):
            if size is None:
                size = len(init)+1
            buftype = c_char * size
            buf = buftype()
            buf.value = init
            return buf
        elif isinstance(init, (int, long)):
            buftype = c_char * init
            buf = buftype()
            return buf
        raise TypeError(init)
    
    def c_buffer(init, size=None):
    ##    "deprecated, use create_string_buffer instead"
    ##    import warnings
    ##    warnings.warn("c_buffer is deprecated, use create_string_buffer instead",
    ##                  DeprecationWarning, stacklevel=2)
        return create_string_buffer(init, size)
    
    _c_functype_cache = {}
    def CFUNCTYPE(restype, *argtypes, **kw):
        """CFUNCTYPE(restype, *argtypes,
                     use_errno=False, use_last_error=False) -> function prototype.
    
        restype: the result type
        argtypes: a sequence specifying the argument types
    
        The function prototype can be called in different ways to create a
        callable object:
    
        prototype(integer address) -> foreign function
        prototype(callable) -> create and return a C callable function from callable
        prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
        prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
        prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
        """
        flags = _FUNCFLAG_CDECL
        if kw.pop("use_errno", False):
            flags |= _FUNCFLAG_USE_ERRNO
        if kw.pop("use_last_error", False):
            flags |= _FUNCFLAG_USE_LASTERROR
        if kw:
            raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
        try:
            return _c_functype_cache[(restype, argtypes, flags)]
        except KeyError:
            class CFunctionType(_CFuncPtr):
                _argtypes_ = argtypes
                _restype_ = restype
                _flags_ = flags
            _c_functype_cache[(restype, argtypes, flags)] = CFunctionType
            return CFunctionType
    
    if _os.name in ("nt", "ce"):
        from _ctypes import LoadLibrary as _dlopen
        from _ctypes import FUNCFLAG_STDCALL as _FUNCFLAG_STDCALL
        if _os.name == "ce":
            # 'ce' doesn't have the stdcall calling convention
            _FUNCFLAG_STDCALL = _FUNCFLAG_CDECL
    
        _win_functype_cache = {}
        def WINFUNCTYPE(restype, *argtypes, **kw):
            # docstring set later (very similar to CFUNCTYPE.__doc__)
            flags = _FUNCFLAG_STDCALL
            if kw.pop("use_errno", False):
                flags |= _FUNCFLAG_USE_ERRNO
            if kw.pop("use_last_error", False):
                flags |= _FUNCFLAG_USE_LASTERROR
            if kw:
                raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
            try:
                return _win_functype_cache[(restype, argtypes, flags)]
            except KeyError:
                class WinFunctionType(_CFuncPtr):
                    _argtypes_ = argtypes
                    _restype_ = restype
                    _flags_ = flags
                _win_functype_cache[(restype, argtypes, flags)] = WinFunctionType
                return WinFunctionType
        if WINFUNCTYPE.__doc__:
            WINFUNCTYPE.__doc__ = CFUNCTYPE.__doc__.replace("CFUNCTYPE", "WINFUNCTYPE")
    
    elif _os.name == "posix":
        from _ctypes import dlopen as _dlopen
    
    from _ctypes import sizeof, byref, addressof, alignment, resize
    from _ctypes import get_errno, set_errno
    from _ctypes import _SimpleCData
    
    def _check_size(typ, typecode=None):
        # Check if sizeof(ctypes_type) against struct.calcsize.  This
        # should protect somewhat against a misconfigured libffi.
        from struct import calcsize
        if typecode is None:
            # Most _type_ codes are the same as used in struct
            typecode = typ._type_
        actual, required = sizeof(typ), calcsize(typecode)
        if actual != required:
            raise SystemError("sizeof(%s) wrong: %d instead of %d" % \
                              (typ, actual, required))
    
    class py_object(_SimpleCData):
        _type_ = "O"
        def __repr__(self):
            try:
                return super(py_object, self).__repr__()
            except ValueError:
                return "%s()" % type(self).__name__
    _check_size(py_object, "P")
    
    class c_short(_SimpleCData):
        _type_ = "h"
    _check_size(c_short)
    
    class c_ushort(_SimpleCData):
        _type_ = "H"
    _check_size(c_ushort)
    
    class c_long(_SimpleCData):
        _type_ = "l"
    _check_size(c_long)
    
    class c_ulong(_SimpleCData):
        _type_ = "L"
    _check_size(c_ulong)
    
    if _calcsize("i") == _calcsize("l"):
        # if int and long have the same size, make c_int an alias for c_long
        c_int = c_long
        c_uint = c_ulong
    else:
        class c_int(_SimpleCData):
            _type_ = "i"
        _check_size(c_int)
    
        class c_uint(_SimpleCData):
            _type_ = "I"
        _check_size(c_uint)
    
    class c_float(_SimpleCData):
        _type_ = "f"
    _check_size(c_float)
    
    class c_double(_SimpleCData):
        _type_ = "d"
    _check_size(c_double)
    
    class c_longdouble(_SimpleCData):
        _type_ = "g"
    if sizeof(c_longdouble) == sizeof(c_double):
        c_longdouble = c_double
    
    if _calcsize("l") == _calcsize("q"):
        # if long and long long have the same size, make c_longlong an alias for c_long
        c_longlong = c_long
        c_ulonglong = c_ulong
    else:
        class c_longlong(_SimpleCData):
            _type_ = "q"
        _check_size(c_longlong)
    
        class c_ulonglong(_SimpleCData):
            _type_ = "Q"
        ##    def from_param(cls, val):
        ##        return ('d', float(val), val)
        ##    from_param = classmethod(from_param)
        _check_size(c_ulonglong)
    
    class c_ubyte(_SimpleCData):
        _type_ = "B"
    c_ubyte.__ctype_le__ = c_ubyte.__ctype_be__ = c_ubyte
    # backward compatibility:
    ##c_uchar = c_ubyte
    _check_size(c_ubyte)
    
    class c_byte(_SimpleCData):
        _type_ = "b"
    c_byte.__ctype_le__ = c_byte.__ctype_be__ = c_byte
    _check_size(c_byte)
    
    class c_char(_SimpleCData):
        _type_ = "c"
    c_char.__ctype_le__ = c_char.__ctype_be__ = c_char
    _check_size(c_char)
    
    class c_char_p(_SimpleCData):
        _type_ = "z"
        if _os.name == "nt":
            def __repr__(self):
                if not windll.kernel32.IsBadStringPtrA(self, -1):
                    return "%s(%r)" % (self.__class__.__name__, self.value)
                return "%s(%s)" % (self.__class__.__name__, cast(self, c_void_p).value)
        else:
            def __repr__(self):
                return "%s(%s)" % (self.__class__.__name__, cast(self, c_void_p).value)
    _check_size(c_char_p, "P")
    
    class c_void_p(_SimpleCData):
        _type_ = "P"
    c_voidp = c_void_p # backwards compatibility (to a bug)
    _check_size(c_void_p)
    
    class c_bool(_SimpleCData):
        _type_ = "?"
    
    from _ctypes import POINTER, pointer, _pointer_type_cache
    
    def _reset_cache():
        _pointer_type_cache.clear()
        _c_functype_cache.clear()
        if _os.name in ("nt", "ce"):
            _win_functype_cache.clear()
        # _SimpleCData.c_wchar_p_from_param
        POINTER(c_wchar).from_param = c_wchar_p.from_param
        # _SimpleCData.c_char_p_from_param
        POINTER(c_char).from_param = c_char_p.from_param
        _pointer_type_cache[None] = c_void_p
    
    try:
        from _ctypes import set_conversion_mode
    except ImportError:
        pass
    else:
        if _os.name in ("nt", "ce"):
            set_conversion_mode("mbcs", "ignore")
        else:
            set_conversion_mode("ascii", "strict")
    
        class c_wchar_p(_SimpleCData):
            _type_ = "Z"
    
        class c_wchar(_SimpleCData):
            _type_ = "u"
    
        def create_unicode_buffer(init, size=None):
            """create_unicode_buffer(aString) -> character array
            create_unicode_buffer(anInteger) -> character array
            create_unicode_buffer(aString, anInteger) -> character array
            """
            if isinstance(init, (str, unicode)):
                if size is None:
                    size = len(init)+1
                buftype = c_wchar * size
                buf = buftype()
                buf.value = init
                return buf
            elif isinstance(init, (int, long)):
                buftype = c_wchar * init
                buf = buftype()
                return buf
            raise TypeError(init)
    
    # XXX Deprecated
    def SetPointerType(pointer, cls):
        if _pointer_type_cache.get(cls, None) is not None:
            raise RuntimeError("This type already exists in the cache")
        if id(pointer) not in _pointer_type_cache:
            raise RuntimeError("What's this???")
        pointer.set_type(cls)
        _pointer_type_cache[cls] = pointer
        del _pointer_type_cache[id(pointer)]
    
    # XXX Deprecated
    def ARRAY(typ, len):
        return typ * len
    
    ################################################################
    
    
    class CDLL(object):
        """An instance of this class represents a loaded dll/shared
        library, exporting functions using the standard C calling
        convention (named 'cdecl' on Windows).
    
        The exported functions can be accessed as attributes, or by
        indexing with the function name.  Examples:
    
        .qsort -> callable object
        ['qsort'] -> callable object
    
        Calling the functions releases the Python GIL during the call and
        reacquires it afterwards.
        """
        _func_flags_ = _FUNCFLAG_CDECL
        _func_restype_ = c_int
        # default values for repr
        _name = ''
        _handle = 0
        _FuncPtr = None
    
        def __init__(self, name, mode=DEFAULT_MODE, handle=None,
                     use_errno=False,
                     use_last_error=False):
            self._name = name
            flags = self._func_flags_
            if use_errno:
                flags |= _FUNCFLAG_USE_ERRNO
            if use_last_error:
                flags |= _FUNCFLAG_USE_LASTERROR
    
            class _FuncPtr(_CFuncPtr):
                _flags_ = flags
                _restype_ = self._func_restype_
            self._FuncPtr = _FuncPtr
    
            if handle is None:
                self._handle = _dlopen(self._name, mode)
            else:
                self._handle = handle
    
        def __repr__(self):
            return "<%s '%s', handle %x at %x>" % \
                   (self.__class__.__name__, self._name,
                    (self._handle & (_sys.maxint*2 + 1)),
                    id(self) & (_sys.maxint*2 + 1))
    
        def __getattr__(self, name):
            if name.startswith('__') and name.endswith('__'):
                raise AttributeError(name)
            func = self.__getitem__(name)
            setattr(self, name, func)
            return func
    
        def __getitem__(self, name_or_ordinal):
            func = self._FuncPtr((name_or_ordinal, self))
            if not isinstance(name_or_ordinal, (int, long)):
                func.__name__ = name_or_ordinal
            return func
    
    class PyDLL(CDLL):
        """This class represents the Python library itself.  It allows
        accessing Python API functions.  The GIL is not released, and
        Python exceptions are handled correctly.
        """
        _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
    
    if _os.name in ("nt", "ce"):
    
        class WinDLL(CDLL):
            """This class represents a dll exporting functions using the
            Windows stdcall calling convention.
            """
            _func_flags_ = _FUNCFLAG_STDCALL
    
        # XXX Hm, what about HRESULT as normal parameter?
        # Mustn't it derive from c_long then?
        from _ctypes import _check_HRESULT, _SimpleCData
        class HRESULT(_SimpleCData):
            _type_ = "l"
            # _check_retval_ is called with the function's result when it
            # is used as restype.  It checks for the FAILED bit, and
            # raises a WindowsError if it is set.
            #
            # The _check_retval_ method is implemented in C, so that the
            # method definition itself is not included in the traceback
            # when it raises an error - that is what we want (and Python
            # doesn't have a way to raise an exception in the caller's
            # frame).
            _check_retval_ = _check_HRESULT
    
        class OleDLL(CDLL):
            """This class represents a dll exporting functions using the
            Windows stdcall calling convention, and returning HRESULT.
            HRESULT error values are automatically raised as WindowsError
            exceptions.
            """
            _func_flags_ = _FUNCFLAG_STDCALL
            _func_restype_ = HRESULT
    
    class LibraryLoader(object):
        def __init__(self, dlltype):
            self._dlltype = dlltype
    
        def __getattr__(self, name):
            if name[0] == '_':
                raise AttributeError(name)
            dll = self._dlltype(name)
            setattr(self, name, dll)
            return dll
    
        def __getitem__(self, name):
            return getattr(self, name)
    
        def LoadLibrary(self, name):
            return self._dlltype(name)
    
    cdll = LibraryLoader(CDLL)
    pydll = LibraryLoader(PyDLL)
    
    if _os.name in ("nt", "ce"):
        pythonapi = PyDLL("python dll", None, _sys.dllhandle)
    elif _sys.platform == "cygwin":
        pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2])
    else:
        pythonapi = PyDLL(None)
    
    
    if _os.name in ("nt", "ce"):
        windll = LibraryLoader(WinDLL)
        oledll = LibraryLoader(OleDLL)
    
        if _os.name == "nt":
            GetLastError = windll.kernel32.GetLastError
        else:
            GetLastError = windll.coredll.GetLastError
        from _ctypes import get_last_error, set_last_error
    
        def WinError(code=None, descr=None):
            if code is None:
                code = GetLastError()
            if descr is None:
                descr = FormatError(code).strip()
            return WindowsError(code, descr)
    
    if sizeof(c_uint) == sizeof(c_void_p):
        c_size_t = c_uint
        c_ssize_t = c_int
    elif sizeof(c_ulong) == sizeof(c_void_p):
        c_size_t = c_ulong
        c_ssize_t = c_long
    elif sizeof(c_ulonglong) == sizeof(c_void_p):
        c_size_t = c_ulonglong
        c_ssize_t = c_longlong
    
    # functions
    
    from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr
    
    ## void *memmove(void *, const void *, size_t);
    memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr)
    
    ## void *memset(void *, int, size_t)
    memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr)
    
    def PYFUNCTYPE(restype, *argtypes):
        class CFunctionType(_CFuncPtr):
            _argtypes_ = argtypes
            _restype_ = restype
            _flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
        return CFunctionType
    
    _cast = PYFUNCTYPE(py_object, c_void_p, py_object, py_object)(_cast_addr)
    def cast(obj, typ):
        return _cast(obj, obj, typ)
    
    _string_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr)
    def string_at(ptr, size=-1):
        """string_at(addr[, size]) -> string
    
        Return the string at addr."""
        return _string_at(ptr, size)
    
    try:
        from _ctypes import _wstring_at_addr
    except ImportError:
        pass
    else:
        _wstring_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_wstring_at_addr)
        def wstring_at(ptr, size=-1):
            """wstring_at(addr[, size]) -> string
    
            Return the string at addr."""
            return _wstring_at(ptr, size)
    
    
    if _os.name in ("nt", "ce"): # COM stuff
        def DllGetClassObject(rclsid, riid, ppv):
            try:
                ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
            except ImportError:
                return -2147221231 # CLASS_E_CLASSNOTAVAILABLE
            else:
                return ccom.DllGetClassObject(rclsid, riid, ppv)
    
        def DllCanUnloadNow():
            try:
                ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
            except ImportError:
                return 0 # S_OK
            return ccom.DllCanUnloadNow()
    
    from ctypes._endian import BigEndianStructure, LittleEndianStructure
    
    # Fill in specifically-sized types
    c_int8 = c_byte
    c_uint8 = c_ubyte
    for kind in [c_short, c_int, c_long, c_longlong]:
        if sizeof(kind) == 2: c_int16 = kind
        elif sizeof(kind) == 4: c_int32 = kind
        elif sizeof(kind) == 8: c_int64 = kind
    for kind in [c_ushort, c_uint, c_ulong, c_ulonglong]:
        if sizeof(kind) == 2: c_uint16 = kind
        elif sizeof(kind) == 4: c_uint32 = kind
        elif sizeof(kind) == 8: c_uint64 = kind
    del(kind)
    
    _reset_cache()
    usr/lib64/python3.6/ctypes/__init__.py000064400000040061151027756770013507 0ustar00"""create and manipulate C data types in Python"""
    
    import os as _os, sys as _sys
    
    __version__ = "1.1.0"
    
    from _ctypes import Union, Structure, Array
    from _ctypes import _Pointer
    from _ctypes import CFuncPtr as _CFuncPtr
    from _ctypes import __version__ as _ctypes_version
    from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
    from _ctypes import ArgumentError
    
    from struct import calcsize as _calcsize
    
    if __version__ != _ctypes_version:
        raise Exception("Version number mismatch", __version__, _ctypes_version)
    
    if _os.name == "nt":
        from _ctypes import FormatError
    
    DEFAULT_MODE = RTLD_LOCAL
    if _os.name == "posix" and _sys.platform == "darwin":
        # On OS X 10.3, we use RTLD_GLOBAL as default mode
        # because RTLD_LOCAL does not work at least on some
        # libraries.  OS X 10.3 is Darwin 7, so we check for
        # that.
    
        if int(_os.uname().release.split('.')[0]) < 8:
            DEFAULT_MODE = RTLD_GLOBAL
    
    from _ctypes import FUNCFLAG_CDECL as _FUNCFLAG_CDECL, \
         FUNCFLAG_PYTHONAPI as _FUNCFLAG_PYTHONAPI, \
         FUNCFLAG_USE_ERRNO as _FUNCFLAG_USE_ERRNO, \
         FUNCFLAG_USE_LASTERROR as _FUNCFLAG_USE_LASTERROR
    
    # WINOLEAPI -> HRESULT
    # WINOLEAPI_(type)
    #
    # STDMETHODCALLTYPE
    #
    # STDMETHOD(name)
    # STDMETHOD_(type, name)
    #
    # STDAPICALLTYPE
    
    def create_string_buffer(init, size=None):
        """create_string_buffer(aBytes) -> character array
        create_string_buffer(anInteger) -> character array
        create_string_buffer(aBytes, anInteger) -> character array
        """
        if isinstance(init, bytes):
            if size is None:
                size = len(init)+1
            buftype = c_char * size
            buf = buftype()
            buf.value = init
            return buf
        elif isinstance(init, int):
            buftype = c_char * init
            buf = buftype()
            return buf
        raise TypeError(init)
    
    def c_buffer(init, size=None):
    ##    "deprecated, use create_string_buffer instead"
    ##    import warnings
    ##    warnings.warn("c_buffer is deprecated, use create_string_buffer instead",
    ##                  DeprecationWarning, stacklevel=2)
        return create_string_buffer(init, size)
    
    _c_functype_cache = {}
    def CFUNCTYPE(restype, *argtypes, **kw):
        """CFUNCTYPE(restype, *argtypes,
                     use_errno=False, use_last_error=False) -> function prototype.
    
        restype: the result type
        argtypes: a sequence specifying the argument types
    
        The function prototype can be called in different ways to create a
        callable object:
    
        prototype(integer address) -> foreign function
        prototype(callable) -> create and return a C callable function from callable
        prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
        prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
        prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
        """
        flags = _FUNCFLAG_CDECL
        if kw.pop("use_errno", False):
            flags |= _FUNCFLAG_USE_ERRNO
        if kw.pop("use_last_error", False):
            flags |= _FUNCFLAG_USE_LASTERROR
        if kw:
            raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
        try:
            return _c_functype_cache[(restype, argtypes, flags)]
        except KeyError:
            class CFunctionType(_CFuncPtr):
                _argtypes_ = argtypes
                _restype_ = restype
                _flags_ = flags
            _c_functype_cache[(restype, argtypes, flags)] = CFunctionType
            return CFunctionType
    
    if _os.name == "nt":
        from _ctypes import LoadLibrary as _dlopen
        from _ctypes import FUNCFLAG_STDCALL as _FUNCFLAG_STDCALL
    
        _win_functype_cache = {}
        def WINFUNCTYPE(restype, *argtypes, **kw):
            # docstring set later (very similar to CFUNCTYPE.__doc__)
            flags = _FUNCFLAG_STDCALL
            if kw.pop("use_errno", False):
                flags |= _FUNCFLAG_USE_ERRNO
            if kw.pop("use_last_error", False):
                flags |= _FUNCFLAG_USE_LASTERROR
            if kw:
                raise ValueError("unexpected keyword argument(s) %s" % kw.keys())
            try:
                return _win_functype_cache[(restype, argtypes, flags)]
            except KeyError:
                class WinFunctionType(_CFuncPtr):
                    _argtypes_ = argtypes
                    _restype_ = restype
                    _flags_ = flags
                _win_functype_cache[(restype, argtypes, flags)] = WinFunctionType
                return WinFunctionType
        if WINFUNCTYPE.__doc__:
            WINFUNCTYPE.__doc__ = CFUNCTYPE.__doc__.replace("CFUNCTYPE", "WINFUNCTYPE")
    
    elif _os.name == "posix":
        from _ctypes import dlopen as _dlopen
    
    from _ctypes import sizeof, byref, addressof, alignment, resize
    from _ctypes import get_errno, set_errno
    from _ctypes import _SimpleCData
    
    def _check_size(typ, typecode=None):
        # Check if sizeof(ctypes_type) against struct.calcsize.  This
        # should protect somewhat against a misconfigured libffi.
        from struct import calcsize
        if typecode is None:
            # Most _type_ codes are the same as used in struct
            typecode = typ._type_
        actual, required = sizeof(typ), calcsize(typecode)
        if actual != required:
            raise SystemError("sizeof(%s) wrong: %d instead of %d" % \
                              (typ, actual, required))
    
    class py_object(_SimpleCData):
        _type_ = "O"
        def __repr__(self):
            try:
                return super().__repr__()
            except ValueError:
                return "%s()" % type(self).__name__
    _check_size(py_object, "P")
    
    class c_short(_SimpleCData):
        _type_ = "h"
    _check_size(c_short)
    
    class c_ushort(_SimpleCData):
        _type_ = "H"
    _check_size(c_ushort)
    
    class c_long(_SimpleCData):
        _type_ = "l"
    _check_size(c_long)
    
    class c_ulong(_SimpleCData):
        _type_ = "L"
    _check_size(c_ulong)
    
    if _calcsize("i") == _calcsize("l"):
        # if int and long have the same size, make c_int an alias for c_long
        c_int = c_long
        c_uint = c_ulong
    else:
        class c_int(_SimpleCData):
            _type_ = "i"
        _check_size(c_int)
    
        class c_uint(_SimpleCData):
            _type_ = "I"
        _check_size(c_uint)
    
    class c_float(_SimpleCData):
        _type_ = "f"
    _check_size(c_float)
    
    class c_double(_SimpleCData):
        _type_ = "d"
    _check_size(c_double)
    
    class c_longdouble(_SimpleCData):
        _type_ = "g"
    if sizeof(c_longdouble) == sizeof(c_double):
        c_longdouble = c_double
    
    if _calcsize("l") == _calcsize("q"):
        # if long and long long have the same size, make c_longlong an alias for c_long
        c_longlong = c_long
        c_ulonglong = c_ulong
    else:
        class c_longlong(_SimpleCData):
            _type_ = "q"
        _check_size(c_longlong)
    
        class c_ulonglong(_SimpleCData):
            _type_ = "Q"
        ##    def from_param(cls, val):
        ##        return ('d', float(val), val)
        ##    from_param = classmethod(from_param)
        _check_size(c_ulonglong)
    
    class c_ubyte(_SimpleCData):
        _type_ = "B"
    c_ubyte.__ctype_le__ = c_ubyte.__ctype_be__ = c_ubyte
    # backward compatibility:
    ##c_uchar = c_ubyte
    _check_size(c_ubyte)
    
    class c_byte(_SimpleCData):
        _type_ = "b"
    c_byte.__ctype_le__ = c_byte.__ctype_be__ = c_byte
    _check_size(c_byte)
    
    class c_char(_SimpleCData):
        _type_ = "c"
    c_char.__ctype_le__ = c_char.__ctype_be__ = c_char
    _check_size(c_char)
    
    class c_char_p(_SimpleCData):
        _type_ = "z"
        def __repr__(self):
            return "%s(%s)" % (self.__class__.__name__, c_void_p.from_buffer(self).value)
    _check_size(c_char_p, "P")
    
    class c_void_p(_SimpleCData):
        _type_ = "P"
    c_voidp = c_void_p # backwards compatibility (to a bug)
    _check_size(c_void_p)
    
    class c_bool(_SimpleCData):
        _type_ = "?"
    
    from _ctypes import POINTER, pointer, _pointer_type_cache
    
    class c_wchar_p(_SimpleCData):
        _type_ = "Z"
        def __repr__(self):
            return "%s(%s)" % (self.__class__.__name__, c_void_p.from_buffer(self).value)
    
    class c_wchar(_SimpleCData):
        _type_ = "u"
    
    def _reset_cache():
        _pointer_type_cache.clear()
        _c_functype_cache.clear()
        if _os.name == "nt":
            _win_functype_cache.clear()
        # _SimpleCData.c_wchar_p_from_param
        POINTER(c_wchar).from_param = c_wchar_p.from_param
        # _SimpleCData.c_char_p_from_param
        POINTER(c_char).from_param = c_char_p.from_param
        _pointer_type_cache[None] = c_void_p
    
    def create_unicode_buffer(init, size=None):
        """create_unicode_buffer(aString) -> character array
        create_unicode_buffer(anInteger) -> character array
        create_unicode_buffer(aString, anInteger) -> character array
        """
        if isinstance(init, str):
            if size is None:
                size = len(init)+1
            buftype = c_wchar * size
            buf = buftype()
            buf.value = init
            return buf
        elif isinstance(init, int):
            buftype = c_wchar * init
            buf = buftype()
            return buf
        raise TypeError(init)
    
    
    # XXX Deprecated
    def SetPointerType(pointer, cls):
        if _pointer_type_cache.get(cls, None) is not None:
            raise RuntimeError("This type already exists in the cache")
        if id(pointer) not in _pointer_type_cache:
            raise RuntimeError("What's this???")
        pointer.set_type(cls)
        _pointer_type_cache[cls] = pointer
        del _pointer_type_cache[id(pointer)]
    
    # XXX Deprecated
    def ARRAY(typ, len):
        return typ * len
    
    ################################################################
    
    
    class CDLL(object):
        """An instance of this class represents a loaded dll/shared
        library, exporting functions using the standard C calling
        convention (named 'cdecl' on Windows).
    
        The exported functions can be accessed as attributes, or by
        indexing with the function name.  Examples:
    
        .qsort -> callable object
        ['qsort'] -> callable object
    
        Calling the functions releases the Python GIL during the call and
        reacquires it afterwards.
        """
        _func_flags_ = _FUNCFLAG_CDECL
        _func_restype_ = c_int
        # default values for repr
        _name = ''
        _handle = 0
        _FuncPtr = None
    
        def __init__(self, name, mode=DEFAULT_MODE, handle=None,
                     use_errno=False,
                     use_last_error=False):
            self._name = name
            flags = self._func_flags_
            if use_errno:
                flags |= _FUNCFLAG_USE_ERRNO
            if use_last_error:
                flags |= _FUNCFLAG_USE_LASTERROR
    
            class _FuncPtr(_CFuncPtr):
                _flags_ = flags
                _restype_ = self._func_restype_
            self._FuncPtr = _FuncPtr
    
            if handle is None:
                self._handle = _dlopen(self._name, mode)
            else:
                self._handle = handle
    
        def __repr__(self):
            return "<%s '%s', handle %x at %#x>" % \
                   (self.__class__.__name__, self._name,
                    (self._handle & (_sys.maxsize*2 + 1)),
                    id(self) & (_sys.maxsize*2 + 1))
    
        def __getattr__(self, name):
            if name.startswith('__') and name.endswith('__'):
                raise AttributeError(name)
            func = self.__getitem__(name)
            setattr(self, name, func)
            return func
    
        def __getitem__(self, name_or_ordinal):
            func = self._FuncPtr((name_or_ordinal, self))
            if not isinstance(name_or_ordinal, int):
                func.__name__ = name_or_ordinal
            return func
    
    class PyDLL(CDLL):
        """This class represents the Python library itself.  It allows
        accessing Python API functions.  The GIL is not released, and
        Python exceptions are handled correctly.
        """
        _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
    
    if _os.name == "nt":
    
        class WinDLL(CDLL):
            """This class represents a dll exporting functions using the
            Windows stdcall calling convention.
            """
            _func_flags_ = _FUNCFLAG_STDCALL
    
        # XXX Hm, what about HRESULT as normal parameter?
        # Mustn't it derive from c_long then?
        from _ctypes import _check_HRESULT, _SimpleCData
        class HRESULT(_SimpleCData):
            _type_ = "l"
            # _check_retval_ is called with the function's result when it
            # is used as restype.  It checks for the FAILED bit, and
            # raises an OSError if it is set.
            #
            # The _check_retval_ method is implemented in C, so that the
            # method definition itself is not included in the traceback
            # when it raises an error - that is what we want (and Python
            # doesn't have a way to raise an exception in the caller's
            # frame).
            _check_retval_ = _check_HRESULT
    
        class OleDLL(CDLL):
            """This class represents a dll exporting functions using the
            Windows stdcall calling convention, and returning HRESULT.
            HRESULT error values are automatically raised as OSError
            exceptions.
            """
            _func_flags_ = _FUNCFLAG_STDCALL
            _func_restype_ = HRESULT
    
    class LibraryLoader(object):
        def __init__(self, dlltype):
            self._dlltype = dlltype
    
        def __getattr__(self, name):
            if name[0] == '_':
                raise AttributeError(name)
            dll = self._dlltype(name)
            setattr(self, name, dll)
            return dll
    
        def __getitem__(self, name):
            return getattr(self, name)
    
        def LoadLibrary(self, name):
            return self._dlltype(name)
    
    cdll = LibraryLoader(CDLL)
    pydll = LibraryLoader(PyDLL)
    
    if _os.name == "nt":
        pythonapi = PyDLL("python dll", None, _sys.dllhandle)
    elif _sys.platform == "cygwin":
        pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2])
    else:
        pythonapi = PyDLL(None)
    
    
    if _os.name == "nt":
        windll = LibraryLoader(WinDLL)
        oledll = LibraryLoader(OleDLL)
    
        if _os.name == "nt":
            GetLastError = windll.kernel32.GetLastError
        else:
            GetLastError = windll.coredll.GetLastError
        from _ctypes import get_last_error, set_last_error
    
        def WinError(code=None, descr=None):
            if code is None:
                code = GetLastError()
            if descr is None:
                descr = FormatError(code).strip()
            return OSError(None, descr, None, code)
    
    if sizeof(c_uint) == sizeof(c_void_p):
        c_size_t = c_uint
        c_ssize_t = c_int
    elif sizeof(c_ulong) == sizeof(c_void_p):
        c_size_t = c_ulong
        c_ssize_t = c_long
    elif sizeof(c_ulonglong) == sizeof(c_void_p):
        c_size_t = c_ulonglong
        c_ssize_t = c_longlong
    
    # functions
    
    from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr
    
    ## void *memmove(void *, const void *, size_t);
    memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr)
    
    ## void *memset(void *, int, size_t)
    memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr)
    
    def PYFUNCTYPE(restype, *argtypes):
        class CFunctionType(_CFuncPtr):
            _argtypes_ = argtypes
            _restype_ = restype
            _flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
        return CFunctionType
    
    _cast = PYFUNCTYPE(py_object, c_void_p, py_object, py_object)(_cast_addr)
    def cast(obj, typ):
        return _cast(obj, obj, typ)
    
    _string_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr)
    def string_at(ptr, size=-1):
        """string_at(addr[, size]) -> string
    
        Return the string at addr."""
        return _string_at(ptr, size)
    
    try:
        from _ctypes import _wstring_at_addr
    except ImportError:
        pass
    else:
        _wstring_at = PYFUNCTYPE(py_object, c_void_p, c_int)(_wstring_at_addr)
        def wstring_at(ptr, size=-1):
            """wstring_at(addr[, size]) -> string
    
            Return the string at addr."""
            return _wstring_at(ptr, size)
    
    
    if _os.name == "nt": # COM stuff
        def DllGetClassObject(rclsid, riid, ppv):
            try:
                ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
            except ImportError:
                return -2147221231 # CLASS_E_CLASSNOTAVAILABLE
            else:
                return ccom.DllGetClassObject(rclsid, riid, ppv)
    
        def DllCanUnloadNow():
            try:
                ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
            except ImportError:
                return 0 # S_OK
            return ccom.DllCanUnloadNow()
    
    from ctypes._endian import BigEndianStructure, LittleEndianStructure
    
    # Fill in specifically-sized types
    c_int8 = c_byte
    c_uint8 = c_ubyte
    for kind in [c_short, c_int, c_long, c_longlong]:
        if sizeof(kind) == 2: c_int16 = kind
        elif sizeof(kind) == 4: c_int32 = kind
        elif sizeof(kind) == 8: c_int64 = kind
    for kind in [c_ushort, c_uint, c_ulong, c_ulonglong]:
        if sizeof(kind) == 2: c_uint16 = kind
        elif sizeof(kind) == 4: c_uint32 = kind
        elif sizeof(kind) == 8: c_uint64 = kind
    del(kind)
    
    _reset_cache()
    usr/lib64/python3.6/logging/__init__.py000064400000213145151027757020013620 0ustar00# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
    #
    # Permission to use, copy, modify, and distribute this software and its
    # documentation for any purpose and without fee is hereby granted,
    # provided that the above copyright notice appear in all copies and that
    # both that copyright notice and this permission notice appear in
    # supporting documentation, and that the name of Vinay Sajip
    # not be used in advertising or publicity pertaining to distribution
    # of the software without specific, written prior permission.
    # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
    # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
    # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
    # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
    # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
    # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    
    """
    Logging package for Python. Based on PEP 282 and comments thereto in
    comp.lang.python.
    
    Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
    
    To use, simply 'import logging' and log away!
    """
    
    import sys, os, time, io, traceback, warnings, weakref, collections
    
    from string import Template
    
    __all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
               'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
               'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
               'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
               'captureWarnings', 'critical', 'debug', 'disable', 'error',
               'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
               'info', 'log', 'makeLogRecord', 'setLoggerClass', 'shutdown',
               'warn', 'warning', 'getLogRecordFactory', 'setLogRecordFactory',
               'lastResort', 'raiseExceptions']
    
    try:
        import threading
    except ImportError: #pragma: no cover
        threading = None
    
    __author__  = "Vinay Sajip "
    __status__  = "production"
    # The following module attributes are no longer updated.
    __version__ = "0.5.1.2"
    __date__    = "07 February 2010"
    
    #---------------------------------------------------------------------------
    #   Miscellaneous module data
    #---------------------------------------------------------------------------
    
    #
    #_startTime is used as the base when calculating the relative time of events
    #
    _startTime = time.time()
    
    #
    #raiseExceptions is used to see if exceptions during handling should be
    #propagated
    #
    raiseExceptions = True
    
    #
    # If you don't want threading information in the log, set this to zero
    #
    logThreads = True
    
    #
    # If you don't want multiprocessing information in the log, set this to zero
    #
    logMultiprocessing = True
    
    #
    # If you don't want process information in the log, set this to zero
    #
    logProcesses = True
    
    #---------------------------------------------------------------------------
    #   Level related stuff
    #---------------------------------------------------------------------------
    #
    # Default levels and level names, these can be replaced with any positive set
    # of values having corresponding names. There is a pseudo-level, NOTSET, which
    # is only really there as a lower limit for user-defined levels. Handlers and
    # loggers are initialized with NOTSET so that they will log all messages, even
    # at user-defined levels.
    #
    
    CRITICAL = 50
    FATAL = CRITICAL
    ERROR = 40
    WARNING = 30
    WARN = WARNING
    INFO = 20
    DEBUG = 10
    NOTSET = 0
    
    _levelToName = {
        CRITICAL: 'CRITICAL',
        ERROR: 'ERROR',
        WARNING: 'WARNING',
        INFO: 'INFO',
        DEBUG: 'DEBUG',
        NOTSET: 'NOTSET',
    }
    _nameToLevel = {
        'CRITICAL': CRITICAL,
        'FATAL': FATAL,
        'ERROR': ERROR,
        'WARN': WARNING,
        'WARNING': WARNING,
        'INFO': INFO,
        'DEBUG': DEBUG,
        'NOTSET': NOTSET,
    }
    
    def getLevelName(level):
        """
        Return the textual representation of logging level 'level'.
    
        If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
        INFO, DEBUG) then you get the corresponding string. If you have
        associated levels with names using addLevelName then the name you have
        associated with 'level' is returned.
    
        If a numeric value corresponding to one of the defined levels is passed
        in, the corresponding string representation is returned.
    
        Otherwise, the string "Level %s" % level is returned.
        """
        # See Issues #22386, #27937 and #29220 for why it's this way
        result = _levelToName.get(level)
        if result is not None:
            return result
        result = _nameToLevel.get(level)
        if result is not None:
            return result
        return "Level %s" % level
    
    def addLevelName(level, levelName):
        """
        Associate 'levelName' with 'level'.
    
        This is used when converting levels to text during message formatting.
        """
        _acquireLock()
        try:    #unlikely to cause an exception, but you never know...
            _levelToName[level] = levelName
            _nameToLevel[levelName] = level
        finally:
            _releaseLock()
    
    if hasattr(sys, '_getframe'):
        currentframe = lambda: sys._getframe(3)
    else: #pragma: no cover
        def currentframe():
            """Return the frame object for the caller's stack frame."""
            try:
                raise Exception
            except Exception:
                return sys.exc_info()[2].tb_frame.f_back
    
    #
    # _srcfile is used when walking the stack to check when we've got the first
    # caller stack frame, by skipping frames whose filename is that of this
    # module's source. It therefore should contain the filename of this module's
    # source file.
    #
    # Ordinarily we would use __file__ for this, but frozen modules don't always
    # have __file__ set, for some reason (see Issue #21736). Thus, we get the
    # filename from a handy code object from a function defined in this module.
    # (There's no particular reason for picking addLevelName.)
    #
    
    _srcfile = os.path.normcase(addLevelName.__code__.co_filename)
    
    # _srcfile is only used in conjunction with sys._getframe().
    # To provide compatibility with older versions of Python, set _srcfile
    # to None if _getframe() is not available; this value will prevent
    # findCaller() from being called. You can also do this if you want to avoid
    # the overhead of fetching caller information, even when _getframe() is
    # available.
    #if not hasattr(sys, '_getframe'):
    #    _srcfile = None
    
    
    def _checkLevel(level):
        if isinstance(level, int):
            rv = level
        elif str(level) == level:
            if level not in _nameToLevel:
                raise ValueError("Unknown level: %r" % level)
            rv = _nameToLevel[level]
        else:
            raise TypeError("Level not an integer or a valid string: %r" % level)
        return rv
    
    #---------------------------------------------------------------------------
    #   Thread-related stuff
    #---------------------------------------------------------------------------
    
    #
    #_lock is used to serialize access to shared data structures in this module.
    #This needs to be an RLock because fileConfig() creates and configures
    #Handlers, and so might arbitrary user threads. Since Handler code updates the
    #shared dictionary _handlers, it needs to acquire the lock. But if configuring,
    #the lock would already have been acquired - so we need an RLock.
    #The same argument applies to Loggers and Manager.loggerDict.
    #
    if threading:
        _lock = threading.RLock()
    else: #pragma: no cover
        _lock = None
    
    
    def _acquireLock():
        """
        Acquire the module-level lock for serializing access to shared data.
    
        This should be released with _releaseLock().
        """
        if _lock:
            _lock.acquire()
    
    def _releaseLock():
        """
        Release the module-level lock acquired by calling _acquireLock().
        """
        if _lock:
            _lock.release()
    
    #---------------------------------------------------------------------------
    #   The logging record
    #---------------------------------------------------------------------------
    
    class LogRecord(object):
        """
        A LogRecord instance represents an event being logged.
    
        LogRecord instances are created every time something is logged. They
        contain all the information pertinent to the event being logged. The
        main information passed in is in msg and args, which are combined
        using str(msg) % args to create the message field of the record. The
        record also includes information such as when the record was created,
        the source line where the logging call was made, and any exception
        information to be logged.
        """
        def __init__(self, name, level, pathname, lineno,
                     msg, args, exc_info, func=None, sinfo=None, **kwargs):
            """
            Initialize a logging record with interesting information.
            """
            ct = time.time()
            self.name = name
            self.msg = msg
            #
            # The following statement allows passing of a dictionary as a sole
            # argument, so that you can do something like
            #  logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
            # Suggested by Stefan Behnel.
            # Note that without the test for args[0], we get a problem because
            # during formatting, we test to see if the arg is present using
            # 'if self.args:'. If the event being logged is e.g. 'Value is %d'
            # and if the passed arg fails 'if self.args:' then no formatting
            # is done. For example, logger.warning('Value is %d', 0) would log
            # 'Value is %d' instead of 'Value is 0'.
            # For the use case of passing a dictionary, this should not be a
            # problem.
            # Issue #21172: a request was made to relax the isinstance check
            # to hasattr(args[0], '__getitem__'). However, the docs on string
            # formatting still seem to suggest a mapping object is required.
            # Thus, while not removing the isinstance check, it does now look
            # for collections.Mapping rather than, as before, dict.
            if (args and len(args) == 1 and isinstance(args[0], collections.Mapping)
                and args[0]):
                args = args[0]
            self.args = args
            self.levelname = getLevelName(level)
            self.levelno = level
            self.pathname = pathname
            try:
                self.filename = os.path.basename(pathname)
                self.module = os.path.splitext(self.filename)[0]
            except (TypeError, ValueError, AttributeError):
                self.filename = pathname
                self.module = "Unknown module"
            self.exc_info = exc_info
            self.exc_text = None      # used to cache the traceback text
            self.stack_info = sinfo
            self.lineno = lineno
            self.funcName = func
            self.created = ct
            self.msecs = (ct - int(ct)) * 1000
            self.relativeCreated = (self.created - _startTime) * 1000
            if logThreads and threading:
                self.thread = threading.get_ident()
                self.threadName = threading.current_thread().name
            else: # pragma: no cover
                self.thread = None
                self.threadName = None
            if not logMultiprocessing: # pragma: no cover
                self.processName = None
            else:
                self.processName = 'MainProcess'
                mp = sys.modules.get('multiprocessing')
                if mp is not None:
                    # Errors may occur if multiprocessing has not finished loading
                    # yet - e.g. if a custom import hook causes third-party code
                    # to run when multiprocessing calls import. See issue 8200
                    # for an example
                    try:
                        self.processName = mp.current_process().name
                    except Exception: #pragma: no cover
                        pass
            if logProcesses and hasattr(os, 'getpid'):
                self.process = os.getpid()
            else:
                self.process = None
    
        def __str__(self):
            return ''%(self.name, self.levelno,
                self.pathname, self.lineno, self.msg)
    
        __repr__ = __str__
    
        def getMessage(self):
            """
            Return the message for this LogRecord.
    
            Return the message for this LogRecord after merging any user-supplied
            arguments with the message.
            """
            msg = str(self.msg)
            if self.args:
                msg = msg % self.args
            return msg
    
    #
    #   Determine which class to use when instantiating log records.
    #
    _logRecordFactory = LogRecord
    
    def setLogRecordFactory(factory):
        """
        Set the factory to be used when instantiating a log record.
    
        :param factory: A callable which will be called to instantiate
        a log record.
        """
        global _logRecordFactory
        _logRecordFactory = factory
    
    def getLogRecordFactory():
        """
        Return the factory to be used when instantiating a log record.
        """
    
        return _logRecordFactory
    
    def makeLogRecord(dict):
        """
        Make a LogRecord whose attributes are defined by the specified dictionary,
        This function is useful for converting a logging event received over
        a socket connection (which is sent as a dictionary) into a LogRecord
        instance.
        """
        rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
        rv.__dict__.update(dict)
        return rv
    
    #---------------------------------------------------------------------------
    #   Formatter classes and functions
    #---------------------------------------------------------------------------
    
    class PercentStyle(object):
    
        default_format = '%(message)s'
        asctime_format = '%(asctime)s'
        asctime_search = '%(asctime)'
    
        def __init__(self, fmt):
            self._fmt = fmt or self.default_format
    
        def usesTime(self):
            return self._fmt.find(self.asctime_search) >= 0
    
        def format(self, record):
            return self._fmt % record.__dict__
    
    class StrFormatStyle(PercentStyle):
        default_format = '{message}'
        asctime_format = '{asctime}'
        asctime_search = '{asctime'
    
        def format(self, record):
            return self._fmt.format(**record.__dict__)
    
    
    class StringTemplateStyle(PercentStyle):
        default_format = '${message}'
        asctime_format = '${asctime}'
        asctime_search = '${asctime}'
    
        def __init__(self, fmt):
            self._fmt = fmt or self.default_format
            self._tpl = Template(self._fmt)
    
        def usesTime(self):
            fmt = self._fmt
            return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
    
        def format(self, record):
            return self._tpl.substitute(**record.__dict__)
    
    BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
    
    _STYLES = {
        '%': (PercentStyle, BASIC_FORMAT),
        '{': (StrFormatStyle, '{levelname}:{name}:{message}'),
        '$': (StringTemplateStyle, '${levelname}:${name}:${message}'),
    }
    
    class Formatter(object):
        """
        Formatter instances are used to convert a LogRecord to text.
    
        Formatters need to know how a LogRecord is constructed. They are
        responsible for converting a LogRecord to (usually) a string which can
        be interpreted by either a human or an external system. The base Formatter
        allows a formatting string to be specified. If none is supplied, the
        the style-dependent default value, "%(message)s", "{message}", or
        "${message}", is used.
    
        The Formatter can be initialized with a format string which makes use of
        knowledge of the LogRecord attributes - e.g. the default value mentioned
        above makes use of the fact that the user's message and arguments are pre-
        formatted into a LogRecord's message attribute. Currently, the useful
        attributes in a LogRecord are described by:
    
        %(name)s            Name of the logger (logging channel)
        %(levelno)s         Numeric logging level for the message (DEBUG, INFO,
                            WARNING, ERROR, CRITICAL)
        %(levelname)s       Text logging level for the message ("DEBUG", "INFO",
                            "WARNING", "ERROR", "CRITICAL")
        %(pathname)s        Full pathname of the source file where the logging
                            call was issued (if available)
        %(filename)s        Filename portion of pathname
        %(module)s          Module (name portion of filename)
        %(lineno)d          Source line number where the logging call was issued
                            (if available)
        %(funcName)s        Function name
        %(created)f         Time when the LogRecord was created (time.time()
                            return value)
        %(asctime)s         Textual time when the LogRecord was created
        %(msecs)d           Millisecond portion of the creation time
        %(relativeCreated)d Time in milliseconds when the LogRecord was created,
                            relative to the time the logging module was loaded
                            (typically at application startup time)
        %(thread)d          Thread ID (if available)
        %(threadName)s      Thread name (if available)
        %(process)d         Process ID (if available)
        %(message)s         The result of record.getMessage(), computed just as
                            the record is emitted
        """
    
        converter = time.localtime
    
        def __init__(self, fmt=None, datefmt=None, style='%'):
            """
            Initialize the formatter with specified format strings.
    
            Initialize the formatter either with the specified format string, or a
            default as described above. Allow for specialized date formatting with
            the optional datefmt argument. If datefmt is omitted, you get an
            ISO8601-like (or RFC 3339-like) format.
    
            Use a style parameter of '%', '{' or '$' to specify that you want to
            use one of %-formatting, :meth:`str.format` (``{}``) formatting or
            :class:`string.Template` formatting in your format string.
    
            .. versionchanged:: 3.2
               Added the ``style`` parameter.
            """
            if style not in _STYLES:
                raise ValueError('Style must be one of: %s' % ','.join(
                                 _STYLES.keys()))
            self._style = _STYLES[style][0](fmt)
            self._fmt = self._style._fmt
            self.datefmt = datefmt
    
        default_time_format = '%Y-%m-%d %H:%M:%S'
        default_msec_format = '%s,%03d'
    
        def formatTime(self, record, datefmt=None):
            """
            Return the creation time of the specified LogRecord as formatted text.
    
            This method should be called from format() by a formatter which
            wants to make use of a formatted time. This method can be overridden
            in formatters to provide for any specific requirement, but the
            basic behaviour is as follows: if datefmt (a string) is specified,
            it is used with time.strftime() to format the creation time of the
            record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used.
            The resulting string is returned. This function uses a user-configurable
            function to convert the creation time to a tuple. By default,
            time.localtime() is used; to change this for a particular formatter
            instance, set the 'converter' attribute to a function with the same
            signature as time.localtime() or time.gmtime(). To change it for all
            formatters, for example if you want all logging times to be shown in GMT,
            set the 'converter' attribute in the Formatter class.
            """
            ct = self.converter(record.created)
            if datefmt:
                s = time.strftime(datefmt, ct)
            else:
                t = time.strftime(self.default_time_format, ct)
                s = self.default_msec_format % (t, record.msecs)
            return s
    
        def formatException(self, ei):
            """
            Format and return the specified exception information as a string.
    
            This default implementation just uses
            traceback.print_exception()
            """
            sio = io.StringIO()
            tb = ei[2]
            # See issues #9427, #1553375. Commented out for now.
            #if getattr(self, 'fullstack', False):
            #    traceback.print_stack(tb.tb_frame.f_back, file=sio)
            traceback.print_exception(ei[0], ei[1], tb, None, sio)
            s = sio.getvalue()
            sio.close()
            if s[-1:] == "\n":
                s = s[:-1]
            return s
    
        def usesTime(self):
            """
            Check if the format uses the creation time of the record.
            """
            return self._style.usesTime()
    
        def formatMessage(self, record):
            return self._style.format(record)
    
        def formatStack(self, stack_info):
            """
            This method is provided as an extension point for specialized
            formatting of stack information.
    
            The input data is a string as returned from a call to
            :func:`traceback.print_stack`, but with the last trailing newline
            removed.
    
            The base implementation just returns the value passed in.
            """
            return stack_info
    
        def format(self, record):
            """
            Format the specified record as text.
    
            The record's attribute dictionary is used as the operand to a
            string formatting operation which yields the returned string.
            Before formatting the dictionary, a couple of preparatory steps
            are carried out. The message attribute of the record is computed
            using LogRecord.getMessage(). If the formatting string uses the
            time (as determined by a call to usesTime(), formatTime() is
            called to format the event time. If there is exception information,
            it is formatted using formatException() and appended to the message.
            """
            record.message = record.getMessage()
            if self.usesTime():
                record.asctime = self.formatTime(record, self.datefmt)
            s = self.formatMessage(record)
            if record.exc_info:
                # Cache the traceback text to avoid converting it multiple times
                # (it's constant anyway)
                if not record.exc_text:
                    record.exc_text = self.formatException(record.exc_info)
            if record.exc_text:
                if s[-1:] != "\n":
                    s = s + "\n"
                s = s + record.exc_text
            if record.stack_info:
                if s[-1:] != "\n":
                    s = s + "\n"
                s = s + self.formatStack(record.stack_info)
            return s
    
    #
    #   The default formatter to use when no other is specified
    #
    _defaultFormatter = Formatter()
    
    class BufferingFormatter(object):
        """
        A formatter suitable for formatting a number of records.
        """
        def __init__(self, linefmt=None):
            """
            Optionally specify a formatter which will be used to format each
            individual record.
            """
            if linefmt:
                self.linefmt = linefmt
            else:
                self.linefmt = _defaultFormatter
    
        def formatHeader(self, records):
            """
            Return the header string for the specified records.
            """
            return ""
    
        def formatFooter(self, records):
            """
            Return the footer string for the specified records.
            """
            return ""
    
        def format(self, records):
            """
            Format the specified records and return the result as a string.
            """
            rv = ""
            if len(records) > 0:
                rv = rv + self.formatHeader(records)
                for record in records:
                    rv = rv + self.linefmt.format(record)
                rv = rv + self.formatFooter(records)
            return rv
    
    #---------------------------------------------------------------------------
    #   Filter classes and functions
    #---------------------------------------------------------------------------
    
    class Filter(object):
        """
        Filter instances are used to perform arbitrary filtering of LogRecords.
    
        Loggers and Handlers can optionally use Filter instances to filter
        records as desired. The base filter class only allows events which are
        below a certain point in the logger hierarchy. For example, a filter
        initialized with "A.B" will allow events logged by loggers "A.B",
        "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
        initialized with the empty string, all events are passed.
        """
        def __init__(self, name=''):
            """
            Initialize a filter.
    
            Initialize with the name of the logger which, together with its
            children, will have its events allowed through the filter. If no
            name is specified, allow every event.
            """
            self.name = name
            self.nlen = len(name)
    
        def filter(self, record):
            """
            Determine if the specified record is to be logged.
    
            Is the specified record to be logged? Returns 0 for no, nonzero for
            yes. If deemed appropriate, the record may be modified in-place.
            """
            if self.nlen == 0:
                return True
            elif self.name == record.name:
                return True
            elif record.name.find(self.name, 0, self.nlen) != 0:
                return False
            return (record.name[self.nlen] == ".")
    
    class Filterer(object):
        """
        A base class for loggers and handlers which allows them to share
        common code.
        """
        def __init__(self):
            """
            Initialize the list of filters to be an empty list.
            """
            self.filters = []
    
        def addFilter(self, filter):
            """
            Add the specified filter to this handler.
            """
            if not (filter in self.filters):
                self.filters.append(filter)
    
        def removeFilter(self, filter):
            """
            Remove the specified filter from this handler.
            """
            if filter in self.filters:
                self.filters.remove(filter)
    
        def filter(self, record):
            """
            Determine if a record is loggable by consulting all the filters.
    
            The default is to allow the record to be logged; any filter can veto
            this and the record is then dropped. Returns a zero value if a record
            is to be dropped, else non-zero.
    
            .. versionchanged:: 3.2
    
               Allow filters to be just callables.
            """
            rv = True
            for f in self.filters:
                if hasattr(f, 'filter'):
                    result = f.filter(record)
                else:
                    result = f(record) # assume callable - will raise if not
                if not result:
                    rv = False
                    break
            return rv
    
    #---------------------------------------------------------------------------
    #   Handler classes and functions
    #---------------------------------------------------------------------------
    
    _handlers = weakref.WeakValueDictionary()  #map of handler names to handlers
    _handlerList = [] # added to allow handlers to be removed in reverse of order initialized
    
    def _removeHandlerRef(wr):
        """
        Remove a handler reference from the internal cleanup list.
        """
        # This function can be called during module teardown, when globals are
        # set to None. It can also be called from another thread. So we need to
        # pre-emptively grab the necessary globals and check if they're None,
        # to prevent race conditions and failures during interpreter shutdown.
        acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
        if acquire and release and handlers:
            acquire()
            try:
                if wr in handlers:
                    handlers.remove(wr)
            finally:
                release()
    
    def _addHandlerRef(handler):
        """
        Add a handler to the internal cleanup list using a weak reference.
        """
        _acquireLock()
        try:
            _handlerList.append(weakref.ref(handler, _removeHandlerRef))
        finally:
            _releaseLock()
    
    class Handler(Filterer):
        """
        Handler instances dispatch logging events to specific destinations.
    
        The base handler class. Acts as a placeholder which defines the Handler
        interface. Handlers can optionally use Formatter instances to format
        records as desired. By default, no formatter is specified; in this case,
        the 'raw' message as determined by record.message is logged.
        """
        def __init__(self, level=NOTSET):
            """
            Initializes the instance - basically setting the formatter to None
            and the filter list to empty.
            """
            Filterer.__init__(self)
            self._name = None
            self.level = _checkLevel(level)
            self.formatter = None
            # Add the handler to the global _handlerList (for cleanup on shutdown)
            _addHandlerRef(self)
            self.createLock()
    
        def get_name(self):
            return self._name
    
        def set_name(self, name):
            _acquireLock()
            try:
                if self._name in _handlers:
                    del _handlers[self._name]
                self._name = name
                if name:
                    _handlers[name] = self
            finally:
                _releaseLock()
    
        name = property(get_name, set_name)
    
        def createLock(self):
            """
            Acquire a thread lock for serializing access to the underlying I/O.
            """
            if threading:
                self.lock = threading.RLock()
            else: #pragma: no cover
                self.lock = None
    
        def acquire(self):
            """
            Acquire the I/O thread lock.
            """
            if self.lock:
                self.lock.acquire()
    
        def release(self):
            """
            Release the I/O thread lock.
            """
            if self.lock:
                self.lock.release()
    
        def setLevel(self, level):
            """
            Set the logging level of this handler.  level must be an int or a str.
            """
            self.level = _checkLevel(level)
    
        def format(self, record):
            """
            Format the specified record.
    
            If a formatter is set, use it. Otherwise, use the default formatter
            for the module.
            """
            if self.formatter:
                fmt = self.formatter
            else:
                fmt = _defaultFormatter
            return fmt.format(record)
    
        def emit(self, record):
            """
            Do whatever it takes to actually log the specified logging record.
    
            This version is intended to be implemented by subclasses and so
            raises a NotImplementedError.
            """
            raise NotImplementedError('emit must be implemented '
                                      'by Handler subclasses')
    
        def handle(self, record):
            """
            Conditionally emit the specified logging record.
    
            Emission depends on filters which may have been added to the handler.
            Wrap the actual emission of the record with acquisition/release of
            the I/O thread lock. Returns whether the filter passed the record for
            emission.
            """
            rv = self.filter(record)
            if rv:
                self.acquire()
                try:
                    self.emit(record)
                finally:
                    self.release()
            return rv
    
        def setFormatter(self, fmt):
            """
            Set the formatter for this handler.
            """
            self.formatter = fmt
    
        def flush(self):
            """
            Ensure all logging output has been flushed.
    
            This version does nothing and is intended to be implemented by
            subclasses.
            """
            pass
    
        def close(self):
            """
            Tidy up any resources used by the handler.
    
            This version removes the handler from an internal map of handlers,
            _handlers, which is used for handler lookup by name. Subclasses
            should ensure that this gets called from overridden close()
            methods.
            """
            #get the module data lock, as we're updating a shared structure.
            _acquireLock()
            try:    #unlikely to raise an exception, but you never know...
                if self._name and self._name in _handlers:
                    del _handlers[self._name]
            finally:
                _releaseLock()
    
        def handleError(self, record):
            """
            Handle errors which occur during an emit() call.
    
            This method should be called from handlers when an exception is
            encountered during an emit() call. If raiseExceptions is false,
            exceptions get silently ignored. This is what is mostly wanted
            for a logging system - most users will not care about errors in
            the logging system, they are more interested in application errors.
            You could, however, replace this with a custom handler if you wish.
            The record which was being processed is passed in to this method.
            """
            if raiseExceptions and sys.stderr:  # see issue 13807
                t, v, tb = sys.exc_info()
                try:
                    sys.stderr.write('--- Logging error ---\n')
                    traceback.print_exception(t, v, tb, None, sys.stderr)
                    sys.stderr.write('Call stack:\n')
                    # Walk the stack frame up until we're out of logging,
                    # so as to print the calling context.
                    frame = tb.tb_frame
                    while (frame and os.path.dirname(frame.f_code.co_filename) ==
                           __path__[0]):
                        frame = frame.f_back
                    if frame:
                        traceback.print_stack(frame, file=sys.stderr)
                    else:
                        # couldn't find the right stack frame, for some reason
                        sys.stderr.write('Logged from file %s, line %s\n' % (
                                         record.filename, record.lineno))
                    # Issue 18671: output logging message and arguments
                    try:
                        sys.stderr.write('Message: %r\n'
                                         'Arguments: %s\n' % (record.msg,
                                                              record.args))
                    except Exception:
                        sys.stderr.write('Unable to print the message and arguments'
                                         ' - possible formatting error.\nUse the'
                                         ' traceback above to help find the error.\n'
                                        )
                except OSError: #pragma: no cover
                    pass    # see issue 5971
                finally:
                    del t, v, tb
    
        def __repr__(self):
            level = getLevelName(self.level)
            return '<%s (%s)>' % (self.__class__.__name__, level)
    
    class StreamHandler(Handler):
        """
        A handler class which writes logging records, appropriately formatted,
        to a stream. Note that this class does not close the stream, as
        sys.stdout or sys.stderr may be used.
        """
    
        terminator = '\n'
    
        def __init__(self, stream=None):
            """
            Initialize the handler.
    
            If stream is not specified, sys.stderr is used.
            """
            Handler.__init__(self)
            if stream is None:
                stream = sys.stderr
            self.stream = stream
    
        def flush(self):
            """
            Flushes the stream.
            """
            self.acquire()
            try:
                if self.stream and hasattr(self.stream, "flush"):
                    self.stream.flush()
            finally:
                self.release()
    
        def emit(self, record):
            """
            Emit a record.
    
            If a formatter is specified, it is used to format the record.
            The record is then written to the stream with a trailing newline.  If
            exception information is present, it is formatted using
            traceback.print_exception and appended to the stream.  If the stream
            has an 'encoding' attribute, it is used to determine how to do the
            output to the stream.
            """
            try:
                msg = self.format(record)
                stream = self.stream
                stream.write(msg)
                stream.write(self.terminator)
                self.flush()
            except Exception:
                self.handleError(record)
    
        def __repr__(self):
            level = getLevelName(self.level)
            name = getattr(self.stream, 'name', '')
            if name:
                name += ' '
            return '<%s %s(%s)>' % (self.__class__.__name__, name, level)
    
    
    class FileHandler(StreamHandler):
        """
        A handler class which writes formatted logging records to disk files.
        """
        def __init__(self, filename, mode='a', encoding=None, delay=False):
            """
            Open the specified file and use it as the stream for logging.
            """
            # Issue #27493: add support for Path objects to be passed in
            filename = os.fspath(filename)
            #keep the absolute path, otherwise derived classes which use this
            #may come a cropper when the current directory changes
            self.baseFilename = os.path.abspath(filename)
            self.mode = mode
            self.encoding = encoding
            self.delay = delay
            if delay:
                #We don't open the stream, but we still need to call the
                #Handler constructor to set level, formatter, lock etc.
                Handler.__init__(self)
                self.stream = None
            else:
                StreamHandler.__init__(self, self._open())
    
        def close(self):
            """
            Closes the stream.
            """
            self.acquire()
            try:
                try:
                    if self.stream:
                        try:
                            self.flush()
                        finally:
                            stream = self.stream
                            self.stream = None
                            if hasattr(stream, "close"):
                                stream.close()
                finally:
                    # Issue #19523: call unconditionally to
                    # prevent a handler leak when delay is set
                    StreamHandler.close(self)
            finally:
                self.release()
    
        def _open(self):
            """
            Open the current base file with the (original) mode and encoding.
            Return the resulting stream.
            """
            return open(self.baseFilename, self.mode, encoding=self.encoding)
    
        def emit(self, record):
            """
            Emit a record.
    
            If the stream was not opened because 'delay' was specified in the
            constructor, open it before calling the superclass's emit.
            """
            if self.stream is None:
                self.stream = self._open()
            StreamHandler.emit(self, record)
    
        def __repr__(self):
            level = getLevelName(self.level)
            return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level)
    
    
    class _StderrHandler(StreamHandler):
        """
        This class is like a StreamHandler using sys.stderr, but always uses
        whatever sys.stderr is currently set to rather than the value of
        sys.stderr at handler construction time.
        """
        def __init__(self, level=NOTSET):
            """
            Initialize the handler.
            """
            Handler.__init__(self, level)
    
        @property
        def stream(self):
            return sys.stderr
    
    
    _defaultLastResort = _StderrHandler(WARNING)
    lastResort = _defaultLastResort
    
    #---------------------------------------------------------------------------
    #   Manager classes and functions
    #---------------------------------------------------------------------------
    
    class PlaceHolder(object):
        """
        PlaceHolder instances are used in the Manager logger hierarchy to take
        the place of nodes for which no loggers have been defined. This class is
        intended for internal use only and not as part of the public API.
        """
        def __init__(self, alogger):
            """
            Initialize with the specified logger being a child of this placeholder.
            """
            self.loggerMap = { alogger : None }
    
        def append(self, alogger):
            """
            Add the specified logger as a child of this placeholder.
            """
            if alogger not in self.loggerMap:
                self.loggerMap[alogger] = None
    
    #
    #   Determine which class to use when instantiating loggers.
    #
    
    def setLoggerClass(klass):
        """
        Set the class to be used when instantiating a logger. The class should
        define __init__() such that only a name argument is required, and the
        __init__() should call Logger.__init__()
        """
        if klass != Logger:
            if not issubclass(klass, Logger):
                raise TypeError("logger not derived from logging.Logger: "
                                + klass.__name__)
        global _loggerClass
        _loggerClass = klass
    
    def getLoggerClass():
        """
        Return the class to be used when instantiating a logger.
        """
        return _loggerClass
    
    class Manager(object):
        """
        There is [under normal circumstances] just one Manager instance, which
        holds the hierarchy of loggers.
        """
        def __init__(self, rootnode):
            """
            Initialize the manager with the root node of the logger hierarchy.
            """
            self.root = rootnode
            self.disable = 0
            self.emittedNoHandlerWarning = False
            self.loggerDict = {}
            self.loggerClass = None
            self.logRecordFactory = None
    
        def getLogger(self, name):
            """
            Get a logger with the specified name (channel name), creating it
            if it doesn't yet exist. This name is a dot-separated hierarchical
            name, such as "a", "a.b", "a.b.c" or similar.
    
            If a PlaceHolder existed for the specified name [i.e. the logger
            didn't exist but a child of it did], replace it with the created
            logger and fix up the parent/child references which pointed to the
            placeholder to now point to the logger.
            """
            rv = None
            if not isinstance(name, str):
                raise TypeError('A logger name must be a string')
            _acquireLock()
            try:
                if name in self.loggerDict:
                    rv = self.loggerDict[name]
                    if isinstance(rv, PlaceHolder):
                        ph = rv
                        rv = (self.loggerClass or _loggerClass)(name)
                        rv.manager = self
                        self.loggerDict[name] = rv
                        self._fixupChildren(ph, rv)
                        self._fixupParents(rv)
                else:
                    rv = (self.loggerClass or _loggerClass)(name)
                    rv.manager = self
                    self.loggerDict[name] = rv
                    self._fixupParents(rv)
            finally:
                _releaseLock()
            return rv
    
        def setLoggerClass(self, klass):
            """
            Set the class to be used when instantiating a logger with this Manager.
            """
            if klass != Logger:
                if not issubclass(klass, Logger):
                    raise TypeError("logger not derived from logging.Logger: "
                                    + klass.__name__)
            self.loggerClass = klass
    
        def setLogRecordFactory(self, factory):
            """
            Set the factory to be used when instantiating a log record with this
            Manager.
            """
            self.logRecordFactory = factory
    
        def _fixupParents(self, alogger):
            """
            Ensure that there are either loggers or placeholders all the way
            from the specified logger to the root of the logger hierarchy.
            """
            name = alogger.name
            i = name.rfind(".")
            rv = None
            while (i > 0) and not rv:
                substr = name[:i]
                if substr not in self.loggerDict:
                    self.loggerDict[substr] = PlaceHolder(alogger)
                else:
                    obj = self.loggerDict[substr]
                    if isinstance(obj, Logger):
                        rv = obj
                    else:
                        assert isinstance(obj, PlaceHolder)
                        obj.append(alogger)
                i = name.rfind(".", 0, i - 1)
            if not rv:
                rv = self.root
            alogger.parent = rv
    
        def _fixupChildren(self, ph, alogger):
            """
            Ensure that children of the placeholder ph are connected to the
            specified logger.
            """
            name = alogger.name
            namelen = len(name)
            for c in ph.loggerMap.keys():
                #The if means ... if not c.parent.name.startswith(nm)
                if c.parent.name[:namelen] != name:
                    alogger.parent = c.parent
                    c.parent = alogger
    
    #---------------------------------------------------------------------------
    #   Logger classes and functions
    #---------------------------------------------------------------------------
    
    class Logger(Filterer):
        """
        Instances of the Logger class represent a single logging channel. A
        "logging channel" indicates an area of an application. Exactly how an
        "area" is defined is up to the application developer. Since an
        application can have any number of areas, logging channels are identified
        by a unique string. Application areas can be nested (e.g. an area
        of "input processing" might include sub-areas "read CSV files", "read
        XLS files" and "read Gnumeric files"). To cater for this natural nesting,
        channel names are organized into a namespace hierarchy where levels are
        separated by periods, much like the Java or Python package namespace. So
        in the instance given above, channel names might be "input" for the upper
        level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
        There is no arbitrary limit to the depth of nesting.
        """
        def __init__(self, name, level=NOTSET):
            """
            Initialize the logger with a name and an optional level.
            """
            Filterer.__init__(self)
            self.name = name
            self.level = _checkLevel(level)
            self.parent = None
            self.propagate = True
            self.handlers = []
            self.disabled = False
    
        def setLevel(self, level):
            """
            Set the logging level of this logger.  level must be an int or a str.
            """
            self.level = _checkLevel(level)
    
        def debug(self, msg, *args, **kwargs):
            """
            Log 'msg % args' with severity 'DEBUG'.
    
            To pass exception information, use the keyword argument exc_info with
            a true value, e.g.
    
            logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
            """
            if self.isEnabledFor(DEBUG):
                self._log(DEBUG, msg, args, **kwargs)
    
        def info(self, msg, *args, **kwargs):
            """
            Log 'msg % args' with severity 'INFO'.
    
            To pass exception information, use the keyword argument exc_info with
            a true value, e.g.
    
            logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
            """
            if self.isEnabledFor(INFO):
                self._log(INFO, msg, args, **kwargs)
    
        def warning(self, msg, *args, **kwargs):
            """
            Log 'msg % args' with severity 'WARNING'.
    
            To pass exception information, use the keyword argument exc_info with
            a true value, e.g.
    
            logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
            """
            if self.isEnabledFor(WARNING):
                self._log(WARNING, msg, args, **kwargs)
    
        def warn(self, msg, *args, **kwargs):
            warnings.warn("The 'warn' method is deprecated, "
                "use 'warning' instead", DeprecationWarning, 2)
            self.warning(msg, *args, **kwargs)
    
        def error(self, msg, *args, **kwargs):
            """
            Log 'msg % args' with severity 'ERROR'.
    
            To pass exception information, use the keyword argument exc_info with
            a true value, e.g.
    
            logger.error("Houston, we have a %s", "major problem", exc_info=1)
            """
            if self.isEnabledFor(ERROR):
                self._log(ERROR, msg, args, **kwargs)
    
        def exception(self, msg, *args, exc_info=True, **kwargs):
            """
            Convenience method for logging an ERROR with exception information.
            """
            self.error(msg, *args, exc_info=exc_info, **kwargs)
    
        def critical(self, msg, *args, **kwargs):
            """
            Log 'msg % args' with severity 'CRITICAL'.
    
            To pass exception information, use the keyword argument exc_info with
            a true value, e.g.
    
            logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
            """
            if self.isEnabledFor(CRITICAL):
                self._log(CRITICAL, msg, args, **kwargs)
    
        fatal = critical
    
        def log(self, level, msg, *args, **kwargs):
            """
            Log 'msg % args' with the integer severity 'level'.
    
            To pass exception information, use the keyword argument exc_info with
            a true value, e.g.
    
            logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
            """
            if not isinstance(level, int):
                if raiseExceptions:
                    raise TypeError("level must be an integer")
                else:
                    return
            if self.isEnabledFor(level):
                self._log(level, msg, args, **kwargs)
    
        def findCaller(self, stack_info=False):
            """
            Find the stack frame of the caller so that we can note the source
            file name, line number and function name.
            """
            f = currentframe()
            #On some versions of IronPython, currentframe() returns None if
            #IronPython isn't run with -X:Frames.
            if f is not None:
                f = f.f_back
            rv = "(unknown file)", 0, "(unknown function)", None
            while hasattr(f, "f_code"):
                co = f.f_code
                filename = os.path.normcase(co.co_filename)
                if filename == _srcfile:
                    f = f.f_back
                    continue
                sinfo = None
                if stack_info:
                    sio = io.StringIO()
                    sio.write('Stack (most recent call last):\n')
                    traceback.print_stack(f, file=sio)
                    sinfo = sio.getvalue()
                    if sinfo[-1] == '\n':
                        sinfo = sinfo[:-1]
                    sio.close()
                rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
                break
            return rv
    
        def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
                       func=None, extra=None, sinfo=None):
            """
            A factory method which can be overridden in subclasses to create
            specialized LogRecords.
            """
            rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
                                 sinfo)
            if extra is not None:
                for key in extra:
                    if (key in ["message", "asctime"]) or (key in rv.__dict__):
                        raise KeyError("Attempt to overwrite %r in LogRecord" % key)
                    rv.__dict__[key] = extra[key]
            return rv
    
        def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
            """
            Low-level logging routine which creates a LogRecord and then calls
            all the handlers of this logger to handle the record.
            """
            sinfo = None
            if _srcfile:
                #IronPython doesn't track Python frames, so findCaller raises an
                #exception on some versions of IronPython. We trap it here so that
                #IronPython can use logging.
                try:
                    fn, lno, func, sinfo = self.findCaller(stack_info)
                except ValueError: # pragma: no cover
                    fn, lno, func = "(unknown file)", 0, "(unknown function)"
            else: # pragma: no cover
                fn, lno, func = "(unknown file)", 0, "(unknown function)"
            if exc_info:
                if isinstance(exc_info, BaseException):
                    exc_info = (type(exc_info), exc_info, exc_info.__traceback__)
                elif not isinstance(exc_info, tuple):
                    exc_info = sys.exc_info()
            record = self.makeRecord(self.name, level, fn, lno, msg, args,
                                     exc_info, func, extra, sinfo)
            self.handle(record)
    
        def handle(self, record):
            """
            Call the handlers for the specified record.
    
            This method is used for unpickled records received from a socket, as
            well as those created locally. Logger-level filtering is applied.
            """
            if (not self.disabled) and self.filter(record):
                self.callHandlers(record)
    
        def addHandler(self, hdlr):
            """
            Add the specified handler to this logger.
            """
            _acquireLock()
            try:
                if not (hdlr in self.handlers):
                    self.handlers.append(hdlr)
            finally:
                _releaseLock()
    
        def removeHandler(self, hdlr):
            """
            Remove the specified handler from this logger.
            """
            _acquireLock()
            try:
                if hdlr in self.handlers:
                    self.handlers.remove(hdlr)
            finally:
                _releaseLock()
    
        def hasHandlers(self):
            """
            See if this logger has any handlers configured.
    
            Loop through all handlers for this logger and its parents in the
            logger hierarchy. Return True if a handler was found, else False.
            Stop searching up the hierarchy whenever a logger with the "propagate"
            attribute set to zero is found - that will be the last logger which
            is checked for the existence of handlers.
            """
            c = self
            rv = False
            while c:
                if c.handlers:
                    rv = True
                    break
                if not c.propagate:
                    break
                else:
                    c = c.parent
            return rv
    
        def callHandlers(self, record):
            """
            Pass a record to all relevant handlers.
    
            Loop through all handlers for this logger and its parents in the
            logger hierarchy. If no handler was found, output a one-off error
            message to sys.stderr. Stop searching up the hierarchy whenever a
            logger with the "propagate" attribute set to zero is found - that
            will be the last logger whose handlers are called.
            """
            c = self
            found = 0
            while c:
                for hdlr in c.handlers:
                    found = found + 1
                    if record.levelno >= hdlr.level:
                        hdlr.handle(record)
                if not c.propagate:
                    c = None    #break out
                else:
                    c = c.parent
            if (found == 0):
                if lastResort:
                    if record.levelno >= lastResort.level:
                        lastResort.handle(record)
                elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
                    sys.stderr.write("No handlers could be found for logger"
                                     " \"%s\"\n" % self.name)
                    self.manager.emittedNoHandlerWarning = True
    
        def getEffectiveLevel(self):
            """
            Get the effective level for this logger.
    
            Loop through this logger and its parents in the logger hierarchy,
            looking for a non-zero logging level. Return the first one found.
            """
            logger = self
            while logger:
                if logger.level:
                    return logger.level
                logger = logger.parent
            return NOTSET
    
        def isEnabledFor(self, level):
            """
            Is this logger enabled for level 'level'?
            """
            if self.manager.disable >= level:
                return False
            return level >= self.getEffectiveLevel()
    
        def getChild(self, suffix):
            """
            Get a logger which is a descendant to this one.
    
            This is a convenience method, such that
    
            logging.getLogger('abc').getChild('def.ghi')
    
            is the same as
    
            logging.getLogger('abc.def.ghi')
    
            It's useful, for example, when the parent logger is named using
            __name__ rather than a literal string.
            """
            if self.root is not self:
                suffix = '.'.join((self.name, suffix))
            return self.manager.getLogger(suffix)
    
        def __repr__(self):
            level = getLevelName(self.getEffectiveLevel())
            return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level)
    
    
    class RootLogger(Logger):
        """
        A root logger is not that different to any other logger, except that
        it must have a logging level and there is only one instance of it in
        the hierarchy.
        """
        def __init__(self, level):
            """
            Initialize the logger with the name "root".
            """
            Logger.__init__(self, "root", level)
    
    _loggerClass = Logger
    
    class LoggerAdapter(object):
        """
        An adapter for loggers which makes it easier to specify contextual
        information in logging output.
        """
    
        def __init__(self, logger, extra):
            """
            Initialize the adapter with a logger and a dict-like object which
            provides contextual information. This constructor signature allows
            easy stacking of LoggerAdapters, if so desired.
    
            You can effectively pass keyword arguments as shown in the
            following example:
    
            adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
            """
            self.logger = logger
            self.extra = extra
    
        def process(self, msg, kwargs):
            """
            Process the logging message and keyword arguments passed in to
            a logging call to insert contextual information. You can either
            manipulate the message itself, the keyword args or both. Return
            the message and kwargs modified (or not) to suit your needs.
    
            Normally, you'll only need to override this one method in a
            LoggerAdapter subclass for your specific needs.
            """
            kwargs["extra"] = self.extra
            return msg, kwargs
    
        #
        # Boilerplate convenience methods
        #
        def debug(self, msg, *args, **kwargs):
            """
            Delegate a debug call to the underlying logger.
            """
            self.log(DEBUG, msg, *args, **kwargs)
    
        def info(self, msg, *args, **kwargs):
            """
            Delegate an info call to the underlying logger.
            """
            self.log(INFO, msg, *args, **kwargs)
    
        def warning(self, msg, *args, **kwargs):
            """
            Delegate a warning call to the underlying logger.
            """
            self.log(WARNING, msg, *args, **kwargs)
    
        def warn(self, msg, *args, **kwargs):
            warnings.warn("The 'warn' method is deprecated, "
                "use 'warning' instead", DeprecationWarning, 2)
            self.warning(msg, *args, **kwargs)
    
        def error(self, msg, *args, **kwargs):
            """
            Delegate an error call to the underlying logger.
            """
            self.log(ERROR, msg, *args, **kwargs)
    
        def exception(self, msg, *args, exc_info=True, **kwargs):
            """
            Delegate an exception call to the underlying logger.
            """
            self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs)
    
        def critical(self, msg, *args, **kwargs):
            """
            Delegate a critical call to the underlying logger.
            """
            self.log(CRITICAL, msg, *args, **kwargs)
    
        def log(self, level, msg, *args, **kwargs):
            """
            Delegate a log call to the underlying logger, after adding
            contextual information from this adapter instance.
            """
            if self.isEnabledFor(level):
                msg, kwargs = self.process(msg, kwargs)
                self.logger.log(level, msg, *args, **kwargs)
    
        def isEnabledFor(self, level):
            """
            Is this logger enabled for level 'level'?
            """
            if self.logger.manager.disable >= level:
                return False
            return level >= self.getEffectiveLevel()
    
        def setLevel(self, level):
            """
            Set the specified level on the underlying logger.
            """
            self.logger.setLevel(level)
    
        def getEffectiveLevel(self):
            """
            Get the effective level for the underlying logger.
            """
            return self.logger.getEffectiveLevel()
    
        def hasHandlers(self):
            """
            See if the underlying logger has any handlers.
            """
            return self.logger.hasHandlers()
    
        def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
            """
            Low-level log implementation, proxied to allow nested logger adapters.
            """
            return self.logger._log(
                level,
                msg,
                args,
                exc_info=exc_info,
                extra=extra,
                stack_info=stack_info,
            )
    
        @property
        def manager(self):
            return self.logger.manager
    
        @manager.setter
        def manager(self, value):
            self.logger.manager = value
    
        @property
        def name(self):
            return self.logger.name
    
        def __repr__(self):
            logger = self.logger
            level = getLevelName(logger.getEffectiveLevel())
            return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level)
    
    root = RootLogger(WARNING)
    Logger.root = root
    Logger.manager = Manager(Logger.root)
    
    #---------------------------------------------------------------------------
    # Configuration classes and functions
    #---------------------------------------------------------------------------
    
    def basicConfig(**kwargs):
        """
        Do basic configuration for the logging system.
    
        This function does nothing if the root logger already has handlers
        configured. It is a convenience method intended for use by simple scripts
        to do one-shot configuration of the logging package.
    
        The default behaviour is to create a StreamHandler which writes to
        sys.stderr, set a formatter using the BASIC_FORMAT format string, and
        add the handler to the root logger.
    
        A number of optional keyword arguments may be specified, which can alter
        the default behaviour.
    
        filename  Specifies that a FileHandler be created, using the specified
                  filename, rather than a StreamHandler.
        filemode  Specifies the mode to open the file, if filename is specified
                  (if filemode is unspecified, it defaults to 'a').
        format    Use the specified format string for the handler.
        datefmt   Use the specified date/time format.
        style     If a format string is specified, use this to specify the
                  type of format string (possible values '%', '{', '$', for
                  %-formatting, :meth:`str.format` and :class:`string.Template`
                  - defaults to '%').
        level     Set the root logger level to the specified level.
        stream    Use the specified stream to initialize the StreamHandler. Note
                  that this argument is incompatible with 'filename' - if both
                  are present, 'stream' is ignored.
        handlers  If specified, this should be an iterable of already created
                  handlers, which will be added to the root handler. Any handler
                  in the list which does not have a formatter assigned will be
                  assigned the formatter created in this function.
    
        Note that you could specify a stream created using open(filename, mode)
        rather than passing the filename and mode in. However, it should be
        remembered that StreamHandler does not close its stream (since it may be
        using sys.stdout or sys.stderr), whereas FileHandler closes its stream
        when the handler is closed.
    
        .. versionchanged:: 3.2
           Added the ``style`` parameter.
    
        .. versionchanged:: 3.3
           Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
           incompatible arguments (e.g. ``handlers`` specified together with
           ``filename``/``filemode``, or ``filename``/``filemode`` specified
           together with ``stream``, or ``handlers`` specified together with
           ``stream``.
        """
        # Add thread safety in case someone mistakenly calls
        # basicConfig() from multiple threads
        _acquireLock()
        try:
            if len(root.handlers) == 0:
                handlers = kwargs.pop("handlers", None)
                if handlers is None:
                    if "stream" in kwargs and "filename" in kwargs:
                        raise ValueError("'stream' and 'filename' should not be "
                                         "specified together")
                else:
                    if "stream" in kwargs or "filename" in kwargs:
                        raise ValueError("'stream' or 'filename' should not be "
                                         "specified together with 'handlers'")
                if handlers is None:
                    filename = kwargs.pop("filename", None)
                    mode = kwargs.pop("filemode", 'a')
                    if filename:
                        h = FileHandler(filename, mode)
                    else:
                        stream = kwargs.pop("stream", None)
                        h = StreamHandler(stream)
                    handlers = [h]
                dfs = kwargs.pop("datefmt", None)
                style = kwargs.pop("style", '%')
                if style not in _STYLES:
                    raise ValueError('Style must be one of: %s' % ','.join(
                                     _STYLES.keys()))
                fs = kwargs.pop("format", _STYLES[style][1])
                fmt = Formatter(fs, dfs, style)
                for h in handlers:
                    if h.formatter is None:
                        h.setFormatter(fmt)
                    root.addHandler(h)
                level = kwargs.pop("level", None)
                if level is not None:
                    root.setLevel(level)
                if kwargs:
                    keys = ', '.join(kwargs.keys())
                    raise ValueError('Unrecognised argument(s): %s' % keys)
        finally:
            _releaseLock()
    
    #---------------------------------------------------------------------------
    # Utility functions at module level.
    # Basically delegate everything to the root logger.
    #---------------------------------------------------------------------------
    
    def getLogger(name=None):
        """
        Return a logger with the specified name, creating it if necessary.
    
        If no name is specified, return the root logger.
        """
        if name:
            return Logger.manager.getLogger(name)
        else:
            return root
    
    def critical(msg, *args, **kwargs):
        """
        Log a message with severity 'CRITICAL' on the root logger. If the logger
        has no handlers, call basicConfig() to add a console handler with a
        pre-defined format.
        """
        if len(root.handlers) == 0:
            basicConfig()
        root.critical(msg, *args, **kwargs)
    
    fatal = critical
    
    def error(msg, *args, **kwargs):
        """
        Log a message with severity 'ERROR' on the root logger. If the logger has
        no handlers, call basicConfig() to add a console handler with a pre-defined
        format.
        """
        if len(root.handlers) == 0:
            basicConfig()
        root.error(msg, *args, **kwargs)
    
    def exception(msg, *args, exc_info=True, **kwargs):
        """
        Log a message with severity 'ERROR' on the root logger, with exception
        information. If the logger has no handlers, basicConfig() is called to add
        a console handler with a pre-defined format.
        """
        error(msg, *args, exc_info=exc_info, **kwargs)
    
    def warning(msg, *args, **kwargs):
        """
        Log a message with severity 'WARNING' on the root logger. If the logger has
        no handlers, call basicConfig() to add a console handler with a pre-defined
        format.
        """
        if len(root.handlers) == 0:
            basicConfig()
        root.warning(msg, *args, **kwargs)
    
    def warn(msg, *args, **kwargs):
        warnings.warn("The 'warn' function is deprecated, "
            "use 'warning' instead", DeprecationWarning, 2)
        warning(msg, *args, **kwargs)
    
    def info(msg, *args, **kwargs):
        """
        Log a message with severity 'INFO' on the root logger. If the logger has
        no handlers, call basicConfig() to add a console handler with a pre-defined
        format.
        """
        if len(root.handlers) == 0:
            basicConfig()
        root.info(msg, *args, **kwargs)
    
    def debug(msg, *args, **kwargs):
        """
        Log a message with severity 'DEBUG' on the root logger. If the logger has
        no handlers, call basicConfig() to add a console handler with a pre-defined
        format.
        """
        if len(root.handlers) == 0:
            basicConfig()
        root.debug(msg, *args, **kwargs)
    
    def log(level, msg, *args, **kwargs):
        """
        Log 'msg % args' with the integer severity 'level' on the root logger. If
        the logger has no handlers, call basicConfig() to add a console handler
        with a pre-defined format.
        """
        if len(root.handlers) == 0:
            basicConfig()
        root.log(level, msg, *args, **kwargs)
    
    def disable(level):
        """
        Disable all logging calls of severity 'level' and below.
        """
        root.manager.disable = level
    
    def shutdown(handlerList=_handlerList):
        """
        Perform any cleanup actions in the logging system (e.g. flushing
        buffers).
    
        Should be called at application exit.
        """
        for wr in reversed(handlerList[:]):
            #errors might occur, for example, if files are locked
            #we just ignore them if raiseExceptions is not set
            try:
                h = wr()
                if h:
                    try:
                        h.acquire()
                        h.flush()
                        h.close()
                    except (OSError, ValueError):
                        # Ignore errors which might be caused
                        # because handlers have been closed but
                        # references to them are still around at
                        # application exit.
                        pass
                    finally:
                        h.release()
            except: # ignore everything, as we're shutting down
                if raiseExceptions:
                    raise
                #else, swallow
    
    #Let's try and shutdown automatically on application exit...
    import atexit
    atexit.register(shutdown)
    
    # Null handler
    
    class NullHandler(Handler):
        """
        This handler does nothing. It's intended to be used to avoid the
        "No handlers could be found for logger XXX" one-off warning. This is
        important for library code, which may contain code to log events. If a user
        of the library does not configure logging, the one-off warning might be
        produced; to avoid this, the library developer simply needs to instantiate
        a NullHandler and add it to the top-level logger of the library module or
        package.
        """
        def handle(self, record):
            """Stub."""
    
        def emit(self, record):
            """Stub."""
    
        def createLock(self):
            self.lock = None
    
    # Warnings integration
    
    _warnings_showwarning = None
    
    def _showwarning(message, category, filename, lineno, file=None, line=None):
        """
        Implementation of showwarnings which redirects to logging, which will first
        check to see if the file parameter is None. If a file is specified, it will
        delegate to the original warnings implementation of showwarning. Otherwise,
        it will call warnings.formatwarning and will log the resulting string to a
        warnings logger named "py.warnings" with level logging.WARNING.
        """
        if file is not None:
            if _warnings_showwarning is not None:
                _warnings_showwarning(message, category, filename, lineno, file, line)
        else:
            s = warnings.formatwarning(message, category, filename, lineno, line)
            logger = getLogger("py.warnings")
            if not logger.handlers:
                logger.addHandler(NullHandler())
            logger.warning("%s", s)
    
    def captureWarnings(capture):
        """
        If capture is true, redirect all warnings to the logging package.
        If capture is False, ensure that warnings are not redirected to logging
        but to their original destinations.
        """
        global _warnings_showwarning
        if capture:
            if _warnings_showwarning is None:
                _warnings_showwarning = warnings.showwarning
                warnings.showwarning = _showwarning
        else:
            if _warnings_showwarning is not None:
                warnings.showwarning = _warnings_showwarning
                _warnings_showwarning = None
    usr/lib64/python3.11/multiprocessing/__init__.py000064400000001624151027757150015476 0ustar00#
    # Package analogous to 'threading.py' but using processes
    #
    # multiprocessing/__init__.py
    #
    # This package is intended to duplicate the functionality (and much of
    # the API) of threading.py but uses processes instead of threads.  A
    # subpackage 'multiprocessing.dummy' has the same API but is a simple
    # wrapper for 'threading'.
    #
    # Copyright (c) 2006-2008, R Oudkerk
    # Licensed to PSF under a Contributor Agreement.
    #
    
    import sys
    from . import context
    
    #
    # Copy stuff from default context
    #
    
    __all__ = [x for x in dir(context._default_context) if not x.startswith('_')]
    globals().update((name, getattr(context._default_context, name)) for name in __all__)
    
    #
    # XXX These should not really be documented or public.
    #
    
    SUBDEBUG = 5
    SUBWARNING = 25
    
    #
    # Alias for main module -- will be reset by bootstrapping child processes
    #
    
    if '__main__' in sys.modules:
        sys.modules['__mp_main__'] = sys.modules['__main__']
    usr/lib64/python3.11/distutils/__init__.py000064400000001035151027757200014263 0ustar00"""distutils
    
    The main package for the Python Module Distribution Utilities.  Normally
    used from a setup script as
    
       from distutils.core import setup
    
       setup (...)
    """
    
    import sys
    import warnings
    
    __version__ = sys.version[:sys.version.index(' ')]
    
    _DEPRECATION_MESSAGE = ("The distutils package is deprecated and slated for "
                            "removal in Python 3.12. Use setuptools or check "
                            "PEP 632 for potential alternatives")
    warnings.warn(_DEPRECATION_MESSAGE,
                  DeprecationWarning, 2)
    usr/lib64/python3.11/ensurepip/__init__.py000064400000022663151027757240014267 0ustar00import collections
    import os
    import os.path
    import subprocess
    import sys
    import sysconfig
    import tempfile
    from importlib import resources
    
    
    __all__ = ["version", "bootstrap"]
    _PACKAGE_NAMES = ('setuptools', 'pip')
    _SETUPTOOLS_VERSION = "65.5.0"
    _PIP_VERSION = "24.0"
    _PROJECTS = [
        ("setuptools", _SETUPTOOLS_VERSION, "py3"),
        ("pip", _PIP_VERSION, "py3"),
    ]
    
    # Packages bundled in ensurepip._bundled have wheel_name set.
    # Packages from WHEEL_PKG_DIR have wheel_path set.
    _Package = collections.namedtuple('Package',
                                      ('version', 'wheel_name', 'wheel_path'))
    
    # Directory of system wheel packages. Some Linux distribution packaging
    # policies recommend against bundling dependencies. For example, Fedora
    # installs wheel packages in the /usr/share/python-wheels/ directory and don't
    # install the ensurepip._bundled package.
    _WHEEL_PKG_DIR = sysconfig.get_config_var('WHEEL_PKG_DIR')
    
    
    def _find_packages(path):
        packages = {}
        try:
            filenames = os.listdir(path)
        except OSError:
            # Ignore: path doesn't exist or permission error
            filenames = ()
        # Make the code deterministic if a directory contains multiple wheel files
        # of the same package, but don't attempt to implement correct version
        # comparison since this case should not happen.
        filenames = sorted(filenames)
        for filename in filenames:
            # filename is like 'pip-21.2.4-py3-none-any.whl'
            if not filename.endswith(".whl"):
                continue
            for name in _PACKAGE_NAMES:
                prefix = name + '-'
                if filename.startswith(prefix):
                    break
            else:
                continue
    
            # Extract '21.2.4' from 'pip-21.2.4-py3-none-any.whl'
            version = filename.removeprefix(prefix).partition('-')[0]
            wheel_path = os.path.join(path, filename)
            packages[name] = _Package(version, None, wheel_path)
        return packages
    
    
    def _get_packages():
        global _PACKAGES, _WHEEL_PKG_DIR
        if _PACKAGES is not None:
            return _PACKAGES
    
        packages = {}
        for name, version, py_tag in _PROJECTS:
            wheel_name = f"{name}-{version}-{py_tag}-none-any.whl"
            packages[name] = _Package(version, wheel_name, None)
        if _WHEEL_PKG_DIR:
            dir_packages = _find_packages(_WHEEL_PKG_DIR)
            # only used the wheel package directory if all packages are found there
            if all(name in dir_packages for name in _PACKAGE_NAMES):
                packages = dir_packages
        _PACKAGES = packages
        return packages
    _PACKAGES = None
    
    
    def _run_pip(args, additional_paths=None):
        # Run the bootstrapping in a subprocess to avoid leaking any state that happens
        # after pip has executed. Particularly, this avoids the case when pip holds onto
        # the files in *additional_paths*, preventing us to remove them at the end of the
        # invocation.
        code = f"""
    import runpy
    import sys
    sys.path = {additional_paths or []} + sys.path
    sys.argv[1:] = {args}
    runpy.run_module("pip", run_name="__main__", alter_sys=True)
    """
    
        cmd = [
            sys.executable,
            '-W',
            'ignore::DeprecationWarning',
            '-c',
            code,
        ]
        if sys.flags.isolated:
            # run code in isolated mode if currently running isolated
            cmd.insert(1, '-I')
        return subprocess.run(cmd, check=True).returncode
    
    
    def version():
        """
        Returns a string specifying the bundled version of pip.
        """
        return _get_packages()['pip'].version
    
    
    def _disable_pip_configuration_settings():
        # We deliberately ignore all pip environment variables
        # when invoking pip
        # See http://bugs.python.org/issue19734 for details
        keys_to_remove = [k for k in os.environ if k.startswith("PIP_")]
        for k in keys_to_remove:
            del os.environ[k]
        # We also ignore the settings in the default pip configuration file
        # See http://bugs.python.org/issue20053 for details
        os.environ['PIP_CONFIG_FILE'] = os.devnull
    
    
    def bootstrap(*, root=None, upgrade=False, user=False,
                  altinstall=False, default_pip=False,
                  verbosity=0):
        """
        Bootstrap pip into the current Python installation (or the given root
        directory).
    
        Note that calling this function will alter both sys.path and os.environ.
        """
        # Discard the return value
        _bootstrap(root=root, upgrade=upgrade, user=user,
                   altinstall=altinstall, default_pip=default_pip,
                   verbosity=verbosity)
    
    
    def _bootstrap(*, root=None, upgrade=False, user=False,
                  altinstall=False, default_pip=False,
                  verbosity=0):
        """
        Bootstrap pip into the current Python installation (or the given root
        directory). Returns pip command status code.
    
        Note that calling this function will alter both sys.path and os.environ.
        """
        if altinstall and default_pip:
            raise ValueError("Cannot use altinstall and default_pip together")
    
        sys.audit("ensurepip.bootstrap", root)
    
        _disable_pip_configuration_settings()
    
        # By default, installing pip and setuptools installs all of the
        # following scripts (X.Y == running Python version):
        #
        #   pip, pipX, pipX.Y, easy_install, easy_install-X.Y
        #
        # pip 1.5+ allows ensurepip to request that some of those be left out
        if altinstall:
            # omit pip, pipX and easy_install
            os.environ["ENSUREPIP_OPTIONS"] = "altinstall"
        elif not default_pip:
            # omit pip and easy_install
            os.environ["ENSUREPIP_OPTIONS"] = "install"
    
        with tempfile.TemporaryDirectory() as tmpdir:
            # Put our bundled wheels into a temporary directory and construct the
            # additional paths that need added to sys.path
            additional_paths = []
            for name, package in _get_packages().items():
                if package.wheel_name:
                    # Use bundled wheel package
                    wheel_name = package.wheel_name
                    wheel_path = resources.files("ensurepip") / "_bundled" / wheel_name
                    whl = wheel_path.read_bytes()
                else:
                    # Use the wheel package directory
                    with open(package.wheel_path, "rb") as fp:
                        whl = fp.read()
                    wheel_name = os.path.basename(package.wheel_path)
    
                filename = os.path.join(tmpdir, wheel_name)
                with open(filename, "wb") as fp:
                    fp.write(whl)
    
                additional_paths.append(filename)
    
            # Construct the arguments to be passed to the pip command
            args = ["install", "--no-cache-dir", "--no-index", "--find-links", tmpdir]
            if root:
                args += ["--root", root]
            if upgrade:
                args += ["--upgrade"]
            if user:
                args += ["--user"]
            if verbosity:
                args += ["-" + "v" * verbosity]
    
            return _run_pip([*args, *_PACKAGE_NAMES], additional_paths)
    
    def _uninstall_helper(*, verbosity=0):
        """Helper to support a clean default uninstall process on Windows
    
        Note that calling this function may alter os.environ.
        """
        # Nothing to do if pip was never installed, or has been removed
        try:
            import pip
        except ImportError:
            return
    
        # If the installed pip version doesn't match the available one,
        # leave it alone
        available_version = version()
        if pip.__version__ != available_version:
            print(f"ensurepip will only uninstall a matching version "
                  f"({pip.__version__!r} installed, "
                  f"{available_version!r} available)",
                  file=sys.stderr)
            return
    
        _disable_pip_configuration_settings()
    
        # Construct the arguments to be passed to the pip command
        args = ["uninstall", "-y", "--disable-pip-version-check"]
        if verbosity:
            args += ["-" + "v" * verbosity]
    
        return _run_pip([*args, *reversed(_PACKAGE_NAMES)])
    
    
    def _main(argv=None):
        import argparse
        parser = argparse.ArgumentParser(prog="python -m ensurepip")
        parser.add_argument(
            "--version",
            action="version",
            version="pip {}".format(version()),
            help="Show the version of pip that is bundled with this Python.",
        )
        parser.add_argument(
            "-v", "--verbose",
            action="count",
            default=0,
            dest="verbosity",
            help=("Give more output. Option is additive, and can be used up to 3 "
                  "times."),
        )
        parser.add_argument(
            "-U", "--upgrade",
            action="store_true",
            default=False,
            help="Upgrade pip and dependencies, even if already installed.",
        )
        parser.add_argument(
            "--user",
            action="store_true",
            default=False,
            help="Install using the user scheme.",
        )
        parser.add_argument(
            "--root",
            default=None,
            help="Install everything relative to this alternate root directory.",
        )
        parser.add_argument(
            "--altinstall",
            action="store_true",
            default=False,
            help=("Make an alternate install, installing only the X.Y versioned "
                  "scripts (Default: pipX, pipX.Y, easy_install-X.Y)."),
        )
        parser.add_argument(
            "--default-pip",
            action="store_true",
            default=False,
            help=("Make a default pip install, installing the unqualified pip "
                  "and easy_install in addition to the versioned scripts."),
        )
    
        args = parser.parse_args(argv)
    
        return _bootstrap(
            root=args.root,
            upgrade=args.upgrade,
            user=args.user,
            verbosity=args.verbosity,
            altinstall=args.altinstall,
            default_pip=args.default_pip,
        )
    usr/lib64/python2.7/curses/__init__.py000064400000003431151027757460013501 0ustar00"""curses
    
    The main package for curses support for Python.  Normally used by importing
    the package, and perhaps a particular module inside it.
    
       import curses
       from curses import textpad
       curses.initscr()
       ...
    
    """
    
    __revision__ = "$Id$"
    
    from _curses import *
    from curses.wrapper import wrapper
    import os as _os
    import sys as _sys
    
    # Some constants, most notably the ACS_* ones, are only added to the C
    # _curses module's dictionary after initscr() is called.  (Some
    # versions of SGI's curses don't define values for those constants
    # until initscr() has been called.)  This wrapper function calls the
    # underlying C initscr(), and then copies the constants from the
    # _curses module to the curses package's dictionary.  Don't do 'from
    # curses import *' if you'll be needing the ACS_* constants.
    
    def initscr():
        import _curses, curses
        # we call setupterm() here because it raises an error
        # instead of calling exit() in error cases.
        setupterm(term=_os.environ.get("TERM", "unknown"),
                  fd=_sys.__stdout__.fileno())
        stdscr = _curses.initscr()
        for key, value in _curses.__dict__.items():
            if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
                setattr(curses, key, value)
    
        return stdscr
    
    # This is a similar wrapper for start_color(), which adds the COLORS and
    # COLOR_PAIRS variables which are only available after start_color() is
    # called.
    
    def start_color():
        import _curses, curses
        retval = _curses.start_color()
        if hasattr(_curses, 'COLORS'):
            curses.COLORS = _curses.COLORS
        if hasattr(_curses, 'COLOR_PAIRS'):
            curses.COLOR_PAIRS = _curses.COLOR_PAIRS
        return retval
    
    # Import Python has_key() implementation if _curses doesn't contain has_key()
    
    try:
        has_key
    except NameError:
        from has_key import has_key
    usr/lib64/python3.6/xml/dom/__init__.py000064400000007663151027757740013570 0ustar00"""W3C Document Object Model implementation for Python.
    
    The Python mapping of the Document Object Model is documented in the
    Python Library Reference in the section on the xml.dom package.
    
    This package contains the following modules:
    
    minidom -- A simple implementation of the Level 1 DOM with namespace
               support added (based on the Level 2 specification) and other
               minor Level 2 functionality.
    
    pulldom -- DOM builder supporting on-demand tree-building for selected
               subtrees of the document.
    
    """
    
    
    class Node:
        """Class giving the NodeType constants."""
        __slots__ = ()
    
        # DOM implementations may use this as a base class for their own
        # Node implementations.  If they don't, the constants defined here
        # should still be used as the canonical definitions as they match
        # the values given in the W3C recommendation.  Client code can
        # safely refer to these values in all tests of Node.nodeType
        # values.
    
        ELEMENT_NODE                = 1
        ATTRIBUTE_NODE              = 2
        TEXT_NODE                   = 3
        CDATA_SECTION_NODE          = 4
        ENTITY_REFERENCE_NODE       = 5
        ENTITY_NODE                 = 6
        PROCESSING_INSTRUCTION_NODE = 7
        COMMENT_NODE                = 8
        DOCUMENT_NODE               = 9
        DOCUMENT_TYPE_NODE          = 10
        DOCUMENT_FRAGMENT_NODE      = 11
        NOTATION_NODE               = 12
    
    
    #ExceptionCode
    INDEX_SIZE_ERR                 = 1
    DOMSTRING_SIZE_ERR             = 2
    HIERARCHY_REQUEST_ERR          = 3
    WRONG_DOCUMENT_ERR             = 4
    INVALID_CHARACTER_ERR          = 5
    NO_DATA_ALLOWED_ERR            = 6
    NO_MODIFICATION_ALLOWED_ERR    = 7
    NOT_FOUND_ERR                  = 8
    NOT_SUPPORTED_ERR              = 9
    INUSE_ATTRIBUTE_ERR            = 10
    INVALID_STATE_ERR              = 11
    SYNTAX_ERR                     = 12
    INVALID_MODIFICATION_ERR       = 13
    NAMESPACE_ERR                  = 14
    INVALID_ACCESS_ERR             = 15
    VALIDATION_ERR                 = 16
    
    
    class DOMException(Exception):
        """Abstract base class for DOM exceptions.
        Exceptions with specific codes are specializations of this class."""
    
        def __init__(self, *args, **kw):
            if self.__class__ is DOMException:
                raise RuntimeError(
                    "DOMException should not be instantiated directly")
            Exception.__init__(self, *args, **kw)
    
        def _get_code(self):
            return self.code
    
    
    class IndexSizeErr(DOMException):
        code = INDEX_SIZE_ERR
    
    class DomstringSizeErr(DOMException):
        code = DOMSTRING_SIZE_ERR
    
    class HierarchyRequestErr(DOMException):
        code = HIERARCHY_REQUEST_ERR
    
    class WrongDocumentErr(DOMException):
        code = WRONG_DOCUMENT_ERR
    
    class InvalidCharacterErr(DOMException):
        code = INVALID_CHARACTER_ERR
    
    class NoDataAllowedErr(DOMException):
        code = NO_DATA_ALLOWED_ERR
    
    class NoModificationAllowedErr(DOMException):
        code = NO_MODIFICATION_ALLOWED_ERR
    
    class NotFoundErr(DOMException):
        code = NOT_FOUND_ERR
    
    class NotSupportedErr(DOMException):
        code = NOT_SUPPORTED_ERR
    
    class InuseAttributeErr(DOMException):
        code = INUSE_ATTRIBUTE_ERR
    
    class InvalidStateErr(DOMException):
        code = INVALID_STATE_ERR
    
    class SyntaxErr(DOMException):
        code = SYNTAX_ERR
    
    class InvalidModificationErr(DOMException):
        code = INVALID_MODIFICATION_ERR
    
    class NamespaceErr(DOMException):
        code = NAMESPACE_ERR
    
    class InvalidAccessErr(DOMException):
        code = INVALID_ACCESS_ERR
    
    class ValidationErr(DOMException):
        code = VALIDATION_ERR
    
    class UserDataHandler:
        """Class giving the operation constants for UserDataHandler.handle()."""
    
        # Based on DOM Level 3 (WD 9 April 2002)
    
        NODE_CLONED   = 1
        NODE_IMPORTED = 2
        NODE_DELETED  = 3
        NODE_RENAMED  = 4
    
    XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
    XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
    XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
    EMPTY_NAMESPACE = None
    EMPTY_PREFIX = None
    
    from .domreg import getDOMImplementation, registerDOMImplementation
    usr/lib64/python3.6/site-packages/lxml/includes/__init__.py000064400000000000151027761100017446 0ustar00usr/lib64/python2.7/distutils/command/__init__.py000064400000001466151027762440015637 0ustar00"""distutils.command
    
    Package containing implementation of all the standard Distutils
    commands."""
    
    __revision__ = "$Id$"
    
    __all__ = ['build',
               'build_py',
               'build_ext',
               'build_clib',
               'build_scripts',
               'clean',
               'install',
               'install_lib',
               'install_headers',
               'install_scripts',
               'install_data',
               'sdist',
               'register',
               'bdist',
               'bdist_dumb',
               'bdist_rpm',
               'bdist_wininst',
               'upload',
               'check',
               # These two are reserved for future use:
               #'bdist_sdux',
               #'bdist_pkgtool',
               # Note:
               # bdist_packager is not included because it only provides
               # an abstract base class
              ]
    usr/lib64/python3.11/lib2to3/pgen2/__init__.py000064400000000217151027763030014527 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
    # Licensed to PSF under a Contributor Agreement.
    
    """The pgen2 package."""
    usr/lib/python3.6/site-packages/dns/__init__.py000064400000002457151027770020015321 0ustar00# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc.
    #
    # Permission to use, copy, modify, and distribute this software and its
    # documentation for any purpose with or without fee is hereby granted,
    # provided that the above copyright notice and this permission notice
    # appear in all copies.
    #
    # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
    # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
    # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
    # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    
    """dnspython DNS toolkit"""
    
    __all__ = [
        'dnssec',
        'e164',
        'edns',
        'entropy',
        'exception',
        'flags',
        'hash',
        'inet',
        'ipv4',
        'ipv6',
        'message',
        'name',
        'namedict',
        'node',
        'opcode',
        'query',
        'rcode',
        'rdata',
        'rdataclass',
        'rdataset',
        'rdatatype',
        'renderer',
        'resolver',
        'reversename',
        'rrset',
        'set',
        'tokenizer',
        'tsig',
        'tsigkeyring',
        'ttl',
        'rdtypes',
        'update',
        'version',
        'wiredata',
        'zone',
    ]
    usr/lib/python2.7/site-packages/pip/__init__.py000064400000027236151027770160015334 0ustar00from __future__ import absolute_import
    
    import locale
    import logging
    import os
    import optparse
    import warnings
    
    import sys
    import re
    
    # 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks,
    # but if invoked (i.e. imported), it will issue a warning to stderr if socks
    # isn't available.  requests unconditionally imports urllib3's socks contrib
    # module, triggering this warning.  The warning breaks DEP-8 tests (because of
    # the stderr output) and is just plain annoying in normal usage.  I don't want
    # to add socks as yet another dependency for pip, nor do I want to allow-stder
    # in the DEP-8 tests, so just suppress the warning.  pdb tells me this has to
    # be done before the import of pip.vcs.
    from pip._vendor.urllib3.exceptions import DependencyWarning
    warnings.filterwarnings("ignore", category=DependencyWarning)  # noqa
    
    # We want to inject the use of SecureTransport as early as possible so that any
    # references or sessions or what have you are ensured to have it, however we
    # only want to do this in the case that we're running on macOS and the linked
    # OpenSSL is too old to handle TLSv1.2
    try:
        import ssl
    except ImportError:
        pass
    else:
        if (sys.platform == "darwin" and
                getattr(ssl, "OPENSSL_VERSION_NUMBER", 0) < 0x1000100f):  # OpenSSL 1.0.1
            try:
                from pip._vendor.urllib3.contrib import securetransport
            except (ImportError, OSError):
                pass
            else:
                securetransport.inject_into_urllib3()
    
    from pip.exceptions import InstallationError, CommandError, PipError
    from pip.utils import get_installed_distributions, get_prog
    from pip.utils import deprecation, dist_is_editable
    from pip.vcs import git, mercurial, subversion, bazaar  # noqa
    from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
    from pip.commands import get_summaries, get_similar_commands
    from pip.commands import commands_dict
    from pip._vendor.urllib3.exceptions import InsecureRequestWarning
    
    
    # assignment for flake8 to be happy
    
    # This fixes a peculiarity when importing via __import__ - as we are
    # initialising the pip module, "from pip import cmdoptions" is recursive
    # and appears not to work properly in that situation.
    import pip.cmdoptions
    cmdoptions = pip.cmdoptions
    
    # The version as used in the setup.py and the docs conf.py
    __version__ = "9.0.3"
    
    
    logger = logging.getLogger(__name__)
    
    # Hide the InsecureRequestWarning from urllib3
    warnings.filterwarnings("ignore", category=InsecureRequestWarning)
    
    
    def autocomplete():
        """Command and option completion for the main option parser (and options)
        and its subcommands (and options).
    
        Enable by sourcing one of the completion shell scripts (bash, zsh or fish).
        """
        # Don't complete if user hasn't sourced bash_completion file.
        if 'PIP_AUTO_COMPLETE' not in os.environ:
            return
        cwords = os.environ['COMP_WORDS'].split()[1:]
        cword = int(os.environ['COMP_CWORD'])
        try:
            current = cwords[cword - 1]
        except IndexError:
            current = ''
    
        subcommands = [cmd for cmd, summary in get_summaries()]
        options = []
        # subcommand
        try:
            subcommand_name = [w for w in cwords if w in subcommands][0]
        except IndexError:
            subcommand_name = None
    
        parser = create_main_parser()
        # subcommand options
        if subcommand_name:
            # special case: 'help' subcommand has no options
            if subcommand_name == 'help':
                sys.exit(1)
            # special case: list locally installed dists for uninstall command
            if subcommand_name == 'uninstall' and not current.startswith('-'):
                installed = []
                lc = current.lower()
                for dist in get_installed_distributions(local_only=True):
                    if dist.key.startswith(lc) and dist.key not in cwords[1:]:
                        installed.append(dist.key)
                # if there are no dists installed, fall back to option completion
                if installed:
                    for dist in installed:
                        print(dist)
                    sys.exit(1)
    
            subcommand = commands_dict[subcommand_name]()
            options += [(opt.get_opt_string(), opt.nargs)
                        for opt in subcommand.parser.option_list_all
                        if opt.help != optparse.SUPPRESS_HELP]
    
            # filter out previously specified options from available options
            prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
            options = [(x, v) for (x, v) in options if x not in prev_opts]
            # filter options by current input
            options = [(k, v) for k, v in options if k.startswith(current)]
            for option in options:
                opt_label = option[0]
                # append '=' to options which require args
                if option[1]:
                    opt_label += '='
                print(opt_label)
        else:
            # show main parser options only when necessary
            if current.startswith('-') or current.startswith('--'):
                opts = [i.option_list for i in parser.option_groups]
                opts.append(parser.option_list)
                opts = (o for it in opts for o in it)
    
                subcommands += [i.get_opt_string() for i in opts
                                if i.help != optparse.SUPPRESS_HELP]
    
            print(' '.join([x for x in subcommands if x.startswith(current)]))
        sys.exit(1)
    
    
    def create_main_parser():
        parser_kw = {
            'usage': '\n%prog  [options]',
            'add_help_option': False,
            'formatter': UpdatingDefaultsHelpFormatter(),
            'name': 'global',
            'prog': get_prog(),
        }
    
        parser = ConfigOptionParser(**parser_kw)
        parser.disable_interspersed_args()
    
        pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
        parser.version = 'pip %s from %s (python %s)' % (
            __version__, pip_pkg_dir, sys.version[:3])
    
        # add the general options
        gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
        parser.add_option_group(gen_opts)
    
        parser.main = True  # so the help formatter knows
    
        # create command listing for description
        command_summaries = get_summaries()
        description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
        parser.description = '\n'.join(description)
    
        return parser
    
    
    def parseopts(args):
        parser = create_main_parser()
    
        # Note: parser calls disable_interspersed_args(), so the result of this
        # call is to split the initial args into the general options before the
        # subcommand and everything else.
        # For example:
        #  args: ['--timeout=5', 'install', '--user', 'INITools']
        #  general_options: ['--timeout==5']
        #  args_else: ['install', '--user', 'INITools']
        general_options, args_else = parser.parse_args(args)
    
        # --version
        if general_options.version:
            sys.stdout.write(parser.version)
            sys.stdout.write(os.linesep)
            sys.exit()
    
        # pip || pip help -> print_help()
        if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
            parser.print_help()
            sys.exit()
    
        # the subcommand name
        cmd_name = args_else[0]
    
        if cmd_name not in commands_dict:
            guess = get_similar_commands(cmd_name)
    
            msg = ['unknown command "%s"' % cmd_name]
            if guess:
                msg.append('maybe you meant "%s"' % guess)
    
            raise CommandError(' - '.join(msg))
    
        # all the args without the subcommand
        cmd_args = args[:]
        cmd_args.remove(cmd_name)
    
        return cmd_name, cmd_args
    
    
    def check_isolated(args):
        isolated = False
    
        if "--isolated" in args:
            isolated = True
    
        return isolated
    
    
    def main(args=None):
        if args is None:
            args = sys.argv[1:]
    
        # Configure our deprecation warnings to be sent through loggers
        deprecation.install_warning_logger()
    
        autocomplete()
    
        try:
            cmd_name, cmd_args = parseopts(args)
        except PipError as exc:
            sys.stderr.write("ERROR: %s" % exc)
            sys.stderr.write(os.linesep)
            sys.exit(1)
    
        # Needed for locale.getpreferredencoding(False) to work
        # in pip.utils.encoding.auto_decode
        try:
            locale.setlocale(locale.LC_ALL, '')
        except locale.Error as e:
            # setlocale can apparently crash if locale are uninitialized
            logger.debug("Ignoring error %s when setting locale", e)
        command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
        return command.main(cmd_args)
    
    
    # ###########################################################
    # # Writing freeze files
    
    class FrozenRequirement(object):
    
        def __init__(self, name, req, editable, comments=()):
            self.name = name
            self.req = req
            self.editable = editable
            self.comments = comments
    
        _rev_re = re.compile(r'-r(\d+)$')
        _date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
    
        @classmethod
        def from_dist(cls, dist, dependency_links):
            location = os.path.normcase(os.path.abspath(dist.location))
            comments = []
            from pip.vcs import vcs, get_src_requirement
            if dist_is_editable(dist) and vcs.get_backend_name(location):
                editable = True
                try:
                    req = get_src_requirement(dist, location)
                except InstallationError as exc:
                    logger.warning(
                        "Error when trying to get requirement for VCS system %s, "
                        "falling back to uneditable format", exc
                    )
                    req = None
                if req is None:
                    logger.warning(
                        'Could not determine repository location of %s', location
                    )
                    comments.append(
                        '## !! Could not determine repository location'
                    )
                    req = dist.as_requirement()
                    editable = False
            else:
                editable = False
                req = dist.as_requirement()
                specs = req.specs
                assert len(specs) == 1 and specs[0][0] in ["==", "==="], \
                    'Expected 1 spec with == or ===; specs = %r; dist = %r' % \
                    (specs, dist)
                version = specs[0][1]
                ver_match = cls._rev_re.search(version)
                date_match = cls._date_re.search(version)
                if ver_match or date_match:
                    svn_backend = vcs.get_backend('svn')
                    if svn_backend:
                        svn_location = svn_backend().get_location(
                            dist,
                            dependency_links,
                        )
                    if not svn_location:
                        logger.warning(
                            'Warning: cannot find svn location for %s', req)
                        comments.append(
                            '## FIXME: could not find svn URL in dependency_links '
                            'for this package:'
                        )
                    else:
                        comments.append(
                            '# Installing as editable to satisfy requirement %s:' %
                            req
                        )
                        if ver_match:
                            rev = ver_match.group(1)
                        else:
                            rev = '{%s}' % date_match.group(1)
                        editable = True
                        req = '%s@%s#egg=%s' % (
                            svn_location,
                            rev,
                            cls.egg_name(dist)
                        )
            return cls(dist.project_name, req, editable, comments)
    
        @staticmethod
        def egg_name(dist):
            name = dist.egg_name()
            match = re.search(r'-py\d\.\d$', name)
            if match:
                name = name[:match.start()]
            return name
    
        def __str__(self):
            req = self.req
            if self.editable:
                req = '-e %s' % req
            return '\n'.join(list(self.comments) + [str(req)]) + '\n'
    
    
    if __name__ == '__main__':
        sys.exit(main())
    usr/lib/python3.6/site-packages/iotop/__init__.py000064400000000000151027773060015654 0ustar00usr/lib64/python3.11/importlib/metadata/__init__.py000064400000074605151027773220016035 0ustar00import os
    import re
    import abc
    import csv
    import sys
    import email
    import pathlib
    import zipfile
    import operator
    import textwrap
    import warnings
    import functools
    import itertools
    import posixpath
    import collections
    
    from . import _adapters, _meta
    from ._collections import FreezableDefaultDict, Pair
    from ._functools import method_cache, pass_none
    from ._itertools import always_iterable, unique_everseen
    from ._meta import PackageMetadata, SimplePath
    
    from contextlib import suppress
    from importlib import import_module
    from importlib.abc import MetaPathFinder
    from itertools import starmap
    from typing import List, Mapping, Optional, Union
    
    
    __all__ = [
        'Distribution',
        'DistributionFinder',
        'PackageMetadata',
        'PackageNotFoundError',
        'distribution',
        'distributions',
        'entry_points',
        'files',
        'metadata',
        'packages_distributions',
        'requires',
        'version',
    ]
    
    
    class PackageNotFoundError(ModuleNotFoundError):
        """The package was not found."""
    
        def __str__(self):
            return f"No package metadata was found for {self.name}"
    
        @property
        def name(self):
            (name,) = self.args
            return name
    
    
    class Sectioned:
        """
        A simple entry point config parser for performance
    
        >>> for item in Sectioned.read(Sectioned._sample):
        ...     print(item)
        Pair(name='sec1', value='# comments ignored')
        Pair(name='sec1', value='a = 1')
        Pair(name='sec1', value='b = 2')
        Pair(name='sec2', value='a = 2')
    
        >>> res = Sectioned.section_pairs(Sectioned._sample)
        >>> item = next(res)
        >>> item.name
        'sec1'
        >>> item.value
        Pair(name='a', value='1')
        >>> item = next(res)
        >>> item.value
        Pair(name='b', value='2')
        >>> item = next(res)
        >>> item.name
        'sec2'
        >>> item.value
        Pair(name='a', value='2')
        >>> list(res)
        []
        """
    
        _sample = textwrap.dedent(
            """
            [sec1]
            # comments ignored
            a = 1
            b = 2
    
            [sec2]
            a = 2
            """
        ).lstrip()
    
        @classmethod
        def section_pairs(cls, text):
            return (
                section._replace(value=Pair.parse(section.value))
                for section in cls.read(text, filter_=cls.valid)
                if section.name is not None
            )
    
        @staticmethod
        def read(text, filter_=None):
            lines = filter(filter_, map(str.strip, text.splitlines()))
            name = None
            for value in lines:
                section_match = value.startswith('[') and value.endswith(']')
                if section_match:
                    name = value.strip('[]')
                    continue
                yield Pair(name, value)
    
        @staticmethod
        def valid(line):
            return line and not line.startswith('#')
    
    
    class DeprecatedTuple:
        """
        Provide subscript item access for backward compatibility.
    
        >>> recwarn = getfixture('recwarn')
        >>> ep = EntryPoint(name='name', value='value', group='group')
        >>> ep[:]
        ('name', 'value', 'group')
        >>> ep[0]
        'name'
        >>> len(recwarn)
        1
        """
    
        _warn = functools.partial(
            warnings.warn,
            "EntryPoint tuple interface is deprecated. Access members by name.",
            DeprecationWarning,
            stacklevel=2,
        )
    
        def __getitem__(self, item):
            self._warn()
            return self._key()[item]
    
    
    class EntryPoint(DeprecatedTuple):
        """An entry point as defined by Python packaging conventions.
    
        See `the packaging docs on entry points
        `_
        for more information.
    
        >>> ep = EntryPoint(
        ...     name=None, group=None, value='package.module:attr [extra1, extra2]')
        >>> ep.module
        'package.module'
        >>> ep.attr
        'attr'
        >>> ep.extras
        ['extra1', 'extra2']
        """
    
        pattern = re.compile(
            r'(?P[\w.]+)\s*'
            r'(:\s*(?P[\w.]+)\s*)?'
            r'((?P\[.*\])\s*)?$'
        )
        """
        A regular expression describing the syntax for an entry point,
        which might look like:
    
            - module
            - package.module
            - package.module:attribute
            - package.module:object.attribute
            - package.module:attr [extra1, extra2]
    
        Other combinations are possible as well.
    
        The expression is lenient about whitespace around the ':',
        following the attr, and following any extras.
        """
    
        name: str
        value: str
        group: str
    
        dist: Optional['Distribution'] = None
    
        def __init__(self, name, value, group):
            vars(self).update(name=name, value=value, group=group)
    
        def load(self):
            """Load the entry point from its definition. If only a module
            is indicated by the value, return that module. Otherwise,
            return the named object.
            """
            match = self.pattern.match(self.value)
            module = import_module(match.group('module'))
            attrs = filter(None, (match.group('attr') or '').split('.'))
            return functools.reduce(getattr, attrs, module)
    
        @property
        def module(self):
            match = self.pattern.match(self.value)
            return match.group('module')
    
        @property
        def attr(self):
            match = self.pattern.match(self.value)
            return match.group('attr')
    
        @property
        def extras(self):
            match = self.pattern.match(self.value)
            return re.findall(r'\w+', match.group('extras') or '')
    
        def _for(self, dist):
            vars(self).update(dist=dist)
            return self
    
        def __iter__(self):
            """
            Supply iter so one may construct dicts of EntryPoints by name.
            """
            msg = (
                "Construction of dict of EntryPoints is deprecated in "
                "favor of EntryPoints."
            )
            warnings.warn(msg, DeprecationWarning)
            return iter((self.name, self))
    
        def matches(self, **params):
            """
            EntryPoint matches the given parameters.
    
            >>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]')
            >>> ep.matches(group='foo')
            True
            >>> ep.matches(name='bar', value='bing:bong [extra1, extra2]')
            True
            >>> ep.matches(group='foo', name='other')
            False
            >>> ep.matches()
            True
            >>> ep.matches(extras=['extra1', 'extra2'])
            True
            >>> ep.matches(module='bing')
            True
            >>> ep.matches(attr='bong')
            True
            """
            attrs = (getattr(self, param) for param in params)
            return all(map(operator.eq, params.values(), attrs))
    
        def _key(self):
            return self.name, self.value, self.group
    
        def __lt__(self, other):
            return self._key() < other._key()
    
        def __eq__(self, other):
            return self._key() == other._key()
    
        def __setattr__(self, name, value):
            raise AttributeError("EntryPoint objects are immutable.")
    
        def __repr__(self):
            return (
                f'EntryPoint(name={self.name!r}, value={self.value!r}, '
                f'group={self.group!r})'
            )
    
        def __hash__(self):
            return hash(self._key())
    
    
    class DeprecatedList(list):
        """
        Allow an otherwise immutable object to implement mutability
        for compatibility.
    
        >>> recwarn = getfixture('recwarn')
        >>> dl = DeprecatedList(range(3))
        >>> dl[0] = 1
        >>> dl.append(3)
        >>> del dl[3]
        >>> dl.reverse()
        >>> dl.sort()
        >>> dl.extend([4])
        >>> dl.pop(-1)
        4
        >>> dl.remove(1)
        >>> dl += [5]
        >>> dl + [6]
        [1, 2, 5, 6]
        >>> dl + (6,)
        [1, 2, 5, 6]
        >>> dl.insert(0, 0)
        >>> dl
        [0, 1, 2, 5]
        >>> dl == [0, 1, 2, 5]
        True
        >>> dl == (0, 1, 2, 5)
        True
        >>> len(recwarn)
        1
        """
    
        __slots__ = ()
    
        _warn = functools.partial(
            warnings.warn,
            "EntryPoints list interface is deprecated. Cast to list if needed.",
            DeprecationWarning,
            stacklevel=2,
        )
    
        def _wrap_deprecated_method(method_name: str):  # type: ignore
            def wrapped(self, *args, **kwargs):
                self._warn()
                return getattr(super(), method_name)(*args, **kwargs)
    
            return method_name, wrapped
    
        locals().update(
            map(
                _wrap_deprecated_method,
                '__setitem__ __delitem__ append reverse extend pop remove '
                '__iadd__ insert sort'.split(),
            )
        )
    
        def __add__(self, other):
            if not isinstance(other, tuple):
                self._warn()
                other = tuple(other)
            return self.__class__(tuple(self) + other)
    
        def __eq__(self, other):
            if not isinstance(other, tuple):
                self._warn()
                other = tuple(other)
    
            return tuple(self).__eq__(other)
    
    
    class EntryPoints(DeprecatedList):
        """
        An immutable collection of selectable EntryPoint objects.
        """
    
        __slots__ = ()
    
        def __getitem__(self, name):  # -> EntryPoint:
            """
            Get the EntryPoint in self matching name.
            """
            if isinstance(name, int):
                warnings.warn(
                    "Accessing entry points by index is deprecated. "
                    "Cast to tuple if needed.",
                    DeprecationWarning,
                    stacklevel=2,
                )
                return super().__getitem__(name)
            try:
                return next(iter(self.select(name=name)))
            except StopIteration:
                raise KeyError(name)
    
        def select(self, **params):
            """
            Select entry points from self that match the
            given parameters (typically group and/or name).
            """
            return EntryPoints(ep for ep in self if ep.matches(**params))
    
        @property
        def names(self):
            """
            Return the set of all names of all entry points.
            """
            return {ep.name for ep in self}
    
        @property
        def groups(self):
            """
            Return the set of all groups of all entry points.
    
            For coverage while SelectableGroups is present.
            >>> EntryPoints().groups
            set()
            """
            return {ep.group for ep in self}
    
        @classmethod
        def _from_text_for(cls, text, dist):
            return cls(ep._for(dist) for ep in cls._from_text(text))
    
        @staticmethod
        def _from_text(text):
            return (
                EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
                for item in Sectioned.section_pairs(text or '')
            )
    
    
    class Deprecated:
        """
        Compatibility add-in for mapping to indicate that
        mapping behavior is deprecated.
    
        >>> recwarn = getfixture('recwarn')
        >>> class DeprecatedDict(Deprecated, dict): pass
        >>> dd = DeprecatedDict(foo='bar')
        >>> dd.get('baz', None)
        >>> dd['foo']
        'bar'
        >>> list(dd)
        ['foo']
        >>> list(dd.keys())
        ['foo']
        >>> 'foo' in dd
        True
        >>> list(dd.values())
        ['bar']
        >>> len(recwarn)
        1
        """
    
        _warn = functools.partial(
            warnings.warn,
            "SelectableGroups dict interface is deprecated. Use select.",
            DeprecationWarning,
            stacklevel=2,
        )
    
        def __getitem__(self, name):
            self._warn()
            return super().__getitem__(name)
    
        def get(self, name, default=None):
            self._warn()
            return super().get(name, default)
    
        def __iter__(self):
            self._warn()
            return super().__iter__()
    
        def __contains__(self, *args):
            self._warn()
            return super().__contains__(*args)
    
        def keys(self):
            self._warn()
            return super().keys()
    
        def values(self):
            self._warn()
            return super().values()
    
    
    class SelectableGroups(Deprecated, dict):
        """
        A backward- and forward-compatible result from
        entry_points that fully implements the dict interface.
        """
    
        @classmethod
        def load(cls, eps):
            by_group = operator.attrgetter('group')
            ordered = sorted(eps, key=by_group)
            grouped = itertools.groupby(ordered, by_group)
            return cls((group, EntryPoints(eps)) for group, eps in grouped)
    
        @property
        def _all(self):
            """
            Reconstruct a list of all entrypoints from the groups.
            """
            groups = super(Deprecated, self).values()
            return EntryPoints(itertools.chain.from_iterable(groups))
    
        @property
        def groups(self):
            return self._all.groups
    
        @property
        def names(self):
            """
            for coverage:
            >>> SelectableGroups().names
            set()
            """
            return self._all.names
    
        def select(self, **params):
            if not params:
                return self
            return self._all.select(**params)
    
    
    class PackagePath(pathlib.PurePosixPath):
        """A reference to a path in a package"""
    
        def read_text(self, encoding='utf-8'):
            with self.locate().open(encoding=encoding) as stream:
                return stream.read()
    
        def read_binary(self):
            with self.locate().open('rb') as stream:
                return stream.read()
    
        def locate(self):
            """Return a path-like object for this path"""
            return self.dist.locate_file(self)
    
    
    class FileHash:
        def __init__(self, spec):
            self.mode, _, self.value = spec.partition('=')
    
        def __repr__(self):
            return f''
    
    
    class Distribution:
        """A Python distribution package."""
    
        @abc.abstractmethod
        def read_text(self, filename):
            """Attempt to load metadata file given by the name.
    
            :param filename: The name of the file in the distribution info.
            :return: The text if found, otherwise None.
            """
    
        @abc.abstractmethod
        def locate_file(self, path):
            """
            Given a path to a file in this distribution, return a path
            to it.
            """
    
        @classmethod
        def from_name(cls, name: str):
            """Return the Distribution for the given package name.
    
            :param name: The name of the distribution package to search for.
            :return: The Distribution instance (or subclass thereof) for the named
                package, if found.
            :raises PackageNotFoundError: When the named package's distribution
                metadata cannot be found.
            :raises ValueError: When an invalid value is supplied for name.
            """
            if not name:
                raise ValueError("A distribution name is required.")
            try:
                return next(cls.discover(name=name))
            except StopIteration:
                raise PackageNotFoundError(name)
    
        @classmethod
        def discover(cls, **kwargs):
            """Return an iterable of Distribution objects for all packages.
    
            Pass a ``context`` or pass keyword arguments for constructing
            a context.
    
            :context: A ``DistributionFinder.Context`` object.
            :return: Iterable of Distribution objects for all packages.
            """
            context = kwargs.pop('context', None)
            if context and kwargs:
                raise ValueError("cannot accept context and kwargs")
            context = context or DistributionFinder.Context(**kwargs)
            return itertools.chain.from_iterable(
                resolver(context) for resolver in cls._discover_resolvers()
            )
    
        @staticmethod
        def at(path):
            """Return a Distribution for the indicated metadata path
    
            :param path: a string or path-like object
            :return: a concrete Distribution instance for the path
            """
            return PathDistribution(pathlib.Path(path))
    
        @staticmethod
        def _discover_resolvers():
            """Search the meta_path for resolvers."""
            declared = (
                getattr(finder, 'find_distributions', None) for finder in sys.meta_path
            )
            return filter(None, declared)
    
        @property
        def metadata(self) -> _meta.PackageMetadata:
            """Return the parsed metadata for this Distribution.
    
            The returned object will have keys that name the various bits of
            metadata.  See PEP 566 for details.
            """
            text = (
                self.read_text('METADATA')
                or self.read_text('PKG-INFO')
                # This last clause is here to support old egg-info files.  Its
                # effect is to just end up using the PathDistribution's self._path
                # (which points to the egg-info file) attribute unchanged.
                or self.read_text('')
            )
            return _adapters.Message(email.message_from_string(text))
    
        @property
        def name(self):
            """Return the 'Name' metadata for the distribution package."""
            return self.metadata['Name']
    
        @property
        def _normalized_name(self):
            """Return a normalized version of the name."""
            return Prepared.normalize(self.name)
    
        @property
        def version(self):
            """Return the 'Version' metadata for the distribution package."""
            return self.metadata['Version']
    
        @property
        def entry_points(self):
            return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
    
        @property
        def files(self):
            """Files in this distribution.
    
            :return: List of PackagePath for this distribution or None
    
            Result is `None` if the metadata file that enumerates files
            (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
            missing.
            Result may be empty if the metadata exists but is empty.
            """
    
            def make_file(name, hash=None, size_str=None):
                result = PackagePath(name)
                result.hash = FileHash(hash) if hash else None
                result.size = int(size_str) if size_str else None
                result.dist = self
                return result
    
            @pass_none
            def make_files(lines):
                return list(starmap(make_file, csv.reader(lines)))
    
            return make_files(self._read_files_distinfo() or self._read_files_egginfo())
    
        def _read_files_distinfo(self):
            """
            Read the lines of RECORD
            """
            text = self.read_text('RECORD')
            return text and text.splitlines()
    
        def _read_files_egginfo(self):
            """
            SOURCES.txt might contain literal commas, so wrap each line
            in quotes.
            """
            text = self.read_text('SOURCES.txt')
            return text and map('"{}"'.format, text.splitlines())
    
        @property
        def requires(self):
            """Generated requirements specified for this Distribution"""
            reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
            return reqs and list(reqs)
    
        def _read_dist_info_reqs(self):
            return self.metadata.get_all('Requires-Dist')
    
        def _read_egg_info_reqs(self):
            source = self.read_text('requires.txt')
            return pass_none(self._deps_from_requires_text)(source)
    
        @classmethod
        def _deps_from_requires_text(cls, source):
            return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
    
        @staticmethod
        def _convert_egg_info_reqs_to_simple_reqs(sections):
            """
            Historically, setuptools would solicit and store 'extra'
            requirements, including those with environment markers,
            in separate sections. More modern tools expect each
            dependency to be defined separately, with any relevant
            extras and environment markers attached directly to that
            requirement. This method converts the former to the
            latter. See _test_deps_from_requires_text for an example.
            """
    
            def make_condition(name):
                return name and f'extra == "{name}"'
    
            def quoted_marker(section):
                section = section or ''
                extra, sep, markers = section.partition(':')
                if extra and markers:
                    markers = f'({markers})'
                conditions = list(filter(None, [markers, make_condition(extra)]))
                return '; ' + ' and '.join(conditions) if conditions else ''
    
            def url_req_space(req):
                """
                PEP 508 requires a space between the url_spec and the quoted_marker.
                Ref python/importlib_metadata#357.
                """
                # '@' is uniquely indicative of a url_req.
                return ' ' * ('@' in req)
    
            for section in sections:
                space = url_req_space(section.value)
                yield section.value + space + quoted_marker(section.name)
    
    
    class DistributionFinder(MetaPathFinder):
        """
        A MetaPathFinder capable of discovering installed distributions.
        """
    
        class Context:
            """
            Keyword arguments presented by the caller to
            ``distributions()`` or ``Distribution.discover()``
            to narrow the scope of a search for distributions
            in all DistributionFinders.
    
            Each DistributionFinder may expect any parameters
            and should attempt to honor the canonical
            parameters defined below when appropriate.
            """
    
            name = None
            """
            Specific name for which a distribution finder should match.
            A name of ``None`` matches all distributions.
            """
    
            def __init__(self, **kwargs):
                vars(self).update(kwargs)
    
            @property
            def path(self):
                """
                The sequence of directory path that a distribution finder
                should search.
    
                Typically refers to Python installed package paths such as
                "site-packages" directories and defaults to ``sys.path``.
                """
                return vars(self).get('path', sys.path)
    
        @abc.abstractmethod
        def find_distributions(self, context=Context()):
            """
            Find distributions.
    
            Return an iterable of all Distribution instances capable of
            loading the metadata for packages matching the ``context``,
            a DistributionFinder.Context instance.
            """
    
    
    class FastPath:
        """
        Micro-optimized class for searching a path for
        children.
    
        >>> FastPath('').children()
        ['...']
        """
    
        @functools.lru_cache()  # type: ignore
        def __new__(cls, root):
            return super().__new__(cls)
    
        def __init__(self, root):
            self.root = root
    
        def joinpath(self, child):
            return pathlib.Path(self.root, child)
    
        def children(self):
            with suppress(Exception):
                return os.listdir(self.root or '.')
            with suppress(Exception):
                return self.zip_children()
            return []
    
        def zip_children(self):
            zip_path = zipfile.Path(self.root)
            names = zip_path.root.namelist()
            self.joinpath = zip_path.joinpath
    
            return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
    
        def search(self, name):
            return self.lookup(self.mtime).search(name)
    
        @property
        def mtime(self):
            with suppress(OSError):
                return os.stat(self.root).st_mtime
            self.lookup.cache_clear()
    
        @method_cache
        def lookup(self, mtime):
            return Lookup(self)
    
    
    class Lookup:
        def __init__(self, path: FastPath):
            base = os.path.basename(path.root).lower()
            base_is_egg = base.endswith(".egg")
            self.infos = FreezableDefaultDict(list)
            self.eggs = FreezableDefaultDict(list)
    
            for child in path.children():
                low = child.lower()
                if low.endswith((".dist-info", ".egg-info")):
                    # rpartition is faster than splitext and suitable for this purpose.
                    name = low.rpartition(".")[0].partition("-")[0]
                    normalized = Prepared.normalize(name)
                    self.infos[normalized].append(path.joinpath(child))
                elif base_is_egg and low == "egg-info":
                    name = base.rpartition(".")[0].partition("-")[0]
                    legacy_normalized = Prepared.legacy_normalize(name)
                    self.eggs[legacy_normalized].append(path.joinpath(child))
    
            self.infos.freeze()
            self.eggs.freeze()
    
        def search(self, prepared):
            infos = (
                self.infos[prepared.normalized]
                if prepared
                else itertools.chain.from_iterable(self.infos.values())
            )
            eggs = (
                self.eggs[prepared.legacy_normalized]
                if prepared
                else itertools.chain.from_iterable(self.eggs.values())
            )
            return itertools.chain(infos, eggs)
    
    
    class Prepared:
        """
        A prepared search for metadata on a possibly-named package.
        """
    
        normalized = None
        legacy_normalized = None
    
        def __init__(self, name):
            self.name = name
            if name is None:
                return
            self.normalized = self.normalize(name)
            self.legacy_normalized = self.legacy_normalize(name)
    
        @staticmethod
        def normalize(name):
            """
            PEP 503 normalization plus dashes as underscores.
            """
            return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
    
        @staticmethod
        def legacy_normalize(name):
            """
            Normalize the package name as found in the convention in
            older packaging tools versions and specs.
            """
            return name.lower().replace('-', '_')
    
        def __bool__(self):
            return bool(self.name)
    
    
    class MetadataPathFinder(DistributionFinder):
        @classmethod
        def find_distributions(cls, context=DistributionFinder.Context()):
            """
            Find distributions.
    
            Return an iterable of all Distribution instances capable of
            loading the metadata for packages matching ``context.name``
            (or all names if ``None`` indicated) along the paths in the list
            of directories ``context.path``.
            """
            found = cls._search_paths(context.name, context.path)
            return map(PathDistribution, found)
    
        @classmethod
        def _search_paths(cls, name, paths):
            """Find metadata directories in paths heuristically."""
            prepared = Prepared(name)
            return itertools.chain.from_iterable(
                path.search(prepared) for path in map(FastPath, paths)
            )
    
        @classmethod
        def invalidate_caches(cls):
            FastPath.__new__.cache_clear()
    
    
    class PathDistribution(Distribution):
        def __init__(self, path: SimplePath):
            """Construct a distribution.
    
            :param path: SimplePath indicating the metadata directory.
            """
            self._path = path
    
        def read_text(self, filename):
            with suppress(
                FileNotFoundError,
                IsADirectoryError,
                KeyError,
                NotADirectoryError,
                PermissionError,
            ):
                return self._path.joinpath(filename).read_text(encoding='utf-8')
    
        read_text.__doc__ = Distribution.read_text.__doc__
    
        def locate_file(self, path):
            return self._path.parent / path
    
        @property
        def _normalized_name(self):
            """
            Performance optimization: where possible, resolve the
            normalized name from the file system path.
            """
            stem = os.path.basename(str(self._path))
            return (
                pass_none(Prepared.normalize)(self._name_from_stem(stem))
                or super()._normalized_name
            )
    
        @staticmethod
        def _name_from_stem(stem):
            """
            >>> PathDistribution._name_from_stem('foo-3.0.egg-info')
            'foo'
            >>> PathDistribution._name_from_stem('CherryPy-3.0.dist-info')
            'CherryPy'
            >>> PathDistribution._name_from_stem('face.egg-info')
            'face'
            >>> PathDistribution._name_from_stem('foo.bar')
            """
            filename, ext = os.path.splitext(stem)
            if ext not in ('.dist-info', '.egg-info'):
                return
            name, sep, rest = filename.partition('-')
            return name
    
    
    def distribution(distribution_name):
        """Get the ``Distribution`` instance for the named package.
    
        :param distribution_name: The name of the distribution package as a string.
        :return: A ``Distribution`` instance (or subclass thereof).
        """
        return Distribution.from_name(distribution_name)
    
    
    def distributions(**kwargs):
        """Get all ``Distribution`` instances in the current environment.
    
        :return: An iterable of ``Distribution`` instances.
        """
        return Distribution.discover(**kwargs)
    
    
    def metadata(distribution_name) -> _meta.PackageMetadata:
        """Get the metadata for the named package.
    
        :param distribution_name: The name of the distribution package to query.
        :return: A PackageMetadata containing the parsed metadata.
        """
        return Distribution.from_name(distribution_name).metadata
    
    
    def version(distribution_name):
        """Get the version string for the named package.
    
        :param distribution_name: The name of the distribution package to query.
        :return: The version string for the package as defined in the package's
            "Version" metadata key.
        """
        return distribution(distribution_name).version
    
    
    _unique = functools.partial(
        unique_everseen,
        key=operator.attrgetter('_normalized_name'),
    )
    """
    Wrapper for ``distributions`` to return unique distributions by name.
    """
    
    
    def entry_points(**params) -> Union[EntryPoints, SelectableGroups]:
        """Return EntryPoint objects for all installed packages.
    
        Pass selection parameters (group or name) to filter the
        result to entry points matching those properties (see
        EntryPoints.select()).
    
        For compatibility, returns ``SelectableGroups`` object unless
        selection parameters are supplied. In the future, this function
        will return ``EntryPoints`` instead of ``SelectableGroups``
        even when no selection parameters are supplied.
    
        For maximum future compatibility, pass selection parameters
        or invoke ``.select`` with parameters on the result.
    
        :return: EntryPoints or SelectableGroups for all installed packages.
        """
        eps = itertools.chain.from_iterable(
            dist.entry_points for dist in _unique(distributions())
        )
        return SelectableGroups.load(eps).select(**params)
    
    
    def files(distribution_name):
        """Return a list of files for the named package.
    
        :param distribution_name: The name of the distribution package to query.
        :return: List of files composing the distribution.
        """
        return distribution(distribution_name).files
    
    
    def requires(distribution_name):
        """
        Return a list of requirements for the named package.
    
        :return: An iterator of requirements, suitable for
            packaging.requirement.Requirement.
        """
        return distribution(distribution_name).requires
    
    
    def packages_distributions() -> Mapping[str, List[str]]:
        """
        Return a mapping of top-level packages to their
        distributions.
    
        >>> import collections.abc
        >>> pkgs = packages_distributions()
        >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
        True
        """
        pkg_to_dist = collections.defaultdict(list)
        for dist in distributions():
            for pkg in _top_level_declared(dist) or _top_level_inferred(dist):
                pkg_to_dist[pkg].append(dist.metadata['Name'])
        return dict(pkg_to_dist)
    
    
    def _top_level_declared(dist):
        return (dist.read_text('top_level.txt') or '').split()
    
    
    def _top_level_inferred(dist):
        return {
            f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name
            for f in always_iterable(dist.files)
            if f.suffix == ".py"
        }
    usr/lib64/python3.6/site-packages/yaml/__init__.py000064400000022607151030016160015640 0ustar00
    from .error import *
    
    from .tokens import *
    from .events import *
    from .nodes import *
    
    from .loader import *
    from .dumper import *
    
    __version__ = '3.12'
    try:
        from .cyaml import *
        __with_libyaml__ = True
    except ImportError:
        __with_libyaml__ = False
    
    import io
    
    def scan(stream, Loader=Loader):
        """
        Scan a YAML stream and produce scanning tokens.
        """
        loader = Loader(stream)
        try:
            while loader.check_token():
                yield loader.get_token()
        finally:
            loader.dispose()
    
    def parse(stream, Loader=Loader):
        """
        Parse a YAML stream and produce parsing events.
        """
        loader = Loader(stream)
        try:
            while loader.check_event():
                yield loader.get_event()
        finally:
            loader.dispose()
    
    def compose(stream, Loader=Loader):
        """
        Parse the first YAML document in a stream
        and produce the corresponding representation tree.
        """
        loader = Loader(stream)
        try:
            return loader.get_single_node()
        finally:
            loader.dispose()
    
    def compose_all(stream, Loader=Loader):
        """
        Parse all YAML documents in a stream
        and produce corresponding representation trees.
        """
        loader = Loader(stream)
        try:
            while loader.check_node():
                yield loader.get_node()
        finally:
            loader.dispose()
    
    def load(stream, Loader=Loader):
        """
        Parse the first YAML document in a stream
        and produce the corresponding Python object.
        """
        loader = Loader(stream)
        try:
            return loader.get_single_data()
        finally:
            loader.dispose()
    
    def load_all(stream, Loader=Loader):
        """
        Parse all YAML documents in a stream
        and produce corresponding Python objects.
        """
        loader = Loader(stream)
        try:
            while loader.check_data():
                yield loader.get_data()
        finally:
            loader.dispose()
    
    def safe_load(stream):
        """
        Parse the first YAML document in a stream
        and produce the corresponding Python object.
        Resolve only basic YAML tags.
        """
        return load(stream, SafeLoader)
    
    def safe_load_all(stream):
        """
        Parse all YAML documents in a stream
        and produce corresponding Python objects.
        Resolve only basic YAML tags.
        """
        return load_all(stream, SafeLoader)
    
    def emit(events, stream=None, Dumper=Dumper,
            canonical=None, indent=None, width=None,
            allow_unicode=None, line_break=None):
        """
        Emit YAML parsing events into a stream.
        If stream is None, return the produced string instead.
        """
        getvalue = None
        if stream is None:
            stream = io.StringIO()
            getvalue = stream.getvalue
        dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
                allow_unicode=allow_unicode, line_break=line_break)
        try:
            for event in events:
                dumper.emit(event)
        finally:
            dumper.dispose()
        if getvalue:
            return getvalue()
    
    def serialize_all(nodes, stream=None, Dumper=Dumper,
            canonical=None, indent=None, width=None,
            allow_unicode=None, line_break=None,
            encoding=None, explicit_start=None, explicit_end=None,
            version=None, tags=None):
        """
        Serialize a sequence of representation trees into a YAML stream.
        If stream is None, return the produced string instead.
        """
        getvalue = None
        if stream is None:
            if encoding is None:
                stream = io.StringIO()
            else:
                stream = io.BytesIO()
            getvalue = stream.getvalue
        dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
                allow_unicode=allow_unicode, line_break=line_break,
                encoding=encoding, version=version, tags=tags,
                explicit_start=explicit_start, explicit_end=explicit_end)
        try:
            dumper.open()
            for node in nodes:
                dumper.serialize(node)
            dumper.close()
        finally:
            dumper.dispose()
        if getvalue:
            return getvalue()
    
    def serialize(node, stream=None, Dumper=Dumper, **kwds):
        """
        Serialize a representation tree into a YAML stream.
        If stream is None, return the produced string instead.
        """
        return serialize_all([node], stream, Dumper=Dumper, **kwds)
    
    def dump_all(documents, stream=None, Dumper=Dumper,
            default_style=None, default_flow_style=None,
            canonical=None, indent=None, width=None,
            allow_unicode=None, line_break=None,
            encoding=None, explicit_start=None, explicit_end=None,
            version=None, tags=None):
        """
        Serialize a sequence of Python objects into a YAML stream.
        If stream is None, return the produced string instead.
        """
        getvalue = None
        if stream is None:
            if encoding is None:
                stream = io.StringIO()
            else:
                stream = io.BytesIO()
            getvalue = stream.getvalue
        dumper = Dumper(stream, default_style=default_style,
                default_flow_style=default_flow_style,
                canonical=canonical, indent=indent, width=width,
                allow_unicode=allow_unicode, line_break=line_break,
                encoding=encoding, version=version, tags=tags,
                explicit_start=explicit_start, explicit_end=explicit_end)
        try:
            dumper.open()
            for data in documents:
                dumper.represent(data)
            dumper.close()
        finally:
            dumper.dispose()
        if getvalue:
            return getvalue()
    
    def dump(data, stream=None, Dumper=Dumper, **kwds):
        """
        Serialize a Python object into a YAML stream.
        If stream is None, return the produced string instead.
        """
        return dump_all([data], stream, Dumper=Dumper, **kwds)
    
    def safe_dump_all(documents, stream=None, **kwds):
        """
        Serialize a sequence of Python objects into a YAML stream.
        Produce only basic YAML tags.
        If stream is None, return the produced string instead.
        """
        return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
    
    def safe_dump(data, stream=None, **kwds):
        """
        Serialize a Python object into a YAML stream.
        Produce only basic YAML tags.
        If stream is None, return the produced string instead.
        """
        return dump_all([data], stream, Dumper=SafeDumper, **kwds)
    
    def add_implicit_resolver(tag, regexp, first=None,
            Loader=Loader, Dumper=Dumper):
        """
        Add an implicit scalar detector.
        If an implicit scalar value matches the given regexp,
        the corresponding tag is assigned to the scalar.
        first is a sequence of possible initial characters or None.
        """
        Loader.add_implicit_resolver(tag, regexp, first)
        Dumper.add_implicit_resolver(tag, regexp, first)
    
    def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
        """
        Add a path based resolver for the given tag.
        A path is a list of keys that forms a path
        to a node in the representation tree.
        Keys can be string values, integers, or None.
        """
        Loader.add_path_resolver(tag, path, kind)
        Dumper.add_path_resolver(tag, path, kind)
    
    def add_constructor(tag, constructor, Loader=Loader):
        """
        Add a constructor for the given tag.
        Constructor is a function that accepts a Loader instance
        and a node object and produces the corresponding Python object.
        """
        Loader.add_constructor(tag, constructor)
    
    def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
        """
        Add a multi-constructor for the given tag prefix.
        Multi-constructor is called for a node if its tag starts with tag_prefix.
        Multi-constructor accepts a Loader instance, a tag suffix,
        and a node object and produces the corresponding Python object.
        """
        Loader.add_multi_constructor(tag_prefix, multi_constructor)
    
    def add_representer(data_type, representer, Dumper=Dumper):
        """
        Add a representer for the given type.
        Representer is a function accepting a Dumper instance
        and an instance of the given data type
        and producing the corresponding representation node.
        """
        Dumper.add_representer(data_type, representer)
    
    def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
        """
        Add a representer for the given type.
        Multi-representer is a function accepting a Dumper instance
        and an instance of the given data type or subtype
        and producing the corresponding representation node.
        """
        Dumper.add_multi_representer(data_type, multi_representer)
    
    class YAMLObjectMetaclass(type):
        """
        The metaclass for YAMLObject.
        """
        def __init__(cls, name, bases, kwds):
            super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
            if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
                cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
                cls.yaml_dumper.add_representer(cls, cls.to_yaml)
    
    class YAMLObject(metaclass=YAMLObjectMetaclass):
        """
        An object that can dump itself to a YAML stream
        and load itself from a YAML stream.
        """
    
        __slots__ = ()  # no direct instantiation, so allow immutable subclasses
    
        yaml_loader = Loader
        yaml_dumper = Dumper
    
        yaml_tag = None
        yaml_flow_style = None
    
        @classmethod
        def from_yaml(cls, loader, node):
            """
            Convert a representation node to a Python object.
            """
            return loader.construct_yaml_object(node, cls)
    
        @classmethod
        def to_yaml(cls, dumper, data):
            """
            Convert a Python object to a representation node.
            """
            return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
                    flow_style=cls.yaml_flow_style)
    
    usr/lib64/python3.6/xml/__init__.py000064400000001055151030030340012744 0ustar00"""Core XML support for Python.
    
    This package contains four sub-packages:
    
    dom -- The W3C Document Object Model.  This supports DOM Level 1 +
           Namespaces.
    
    parsers -- Python wrappers for XML parsers (currently only supports Expat).
    
    sax -- The Simple API for XML, developed by XML-Dev, led by David
           Megginson and ported to Python by Lars Marius Garshol.  This
           supports the SAX 2 API.
    
    etree -- The ElementTree XML library.  This is a subset of the full
           ElementTree XML release.
    
    """
    
    
    __all__ = ["dom", "parsers", "sax", "etree"]
    usr/lib64/python3.11/tomllib/__init__.py000064400000000464151030031660013673 0ustar00# SPDX-License-Identifier: MIT
    # SPDX-FileCopyrightText: 2021 Taneli Hukkinen
    # Licensed to PSF under a Contributor Agreement.
    
    __all__ = ("loads", "load", "TOMLDecodeError")
    
    from ._parser import TOMLDecodeError, load, loads
    
    # Pretend this exception was created here.
    TOMLDecodeError.__module__ = __name__
    usr/lib/python3.6/site-packages/slip/__init__.py000064400000000000151030032040015445 0ustar00usr/lib64/python3.6/site-packages/lxml/__init__.py000064400000001047151030034050015643 0ustar00# this is a package
    
    def get_include():
        """
        Returns a list of header include paths (for lxml itself, libxml2
        and libxslt) needed to compile C code against lxml if it was built
        with statically linked libraries.
        """
        import os
        lxml_path = __path__[0]
        include_path = os.path.join(lxml_path, 'includes')
        includes = [include_path, lxml_path]
    
        for name in os.listdir(include_path):
            path = os.path.join(include_path, name)
            if os.path.isdir(path):
                includes.append(path)
    
        return includes
    
    usr/lib64/python3.6/html/__init__.py000064400000011224151030034120013107 0ustar00"""
    General functions for HTML manipulation.
    """
    
    import re as _re
    from html.entities import html5 as _html5
    
    
    __all__ = ['escape', 'unescape']
    
    
    def escape(s, quote=True):
        """
        Replace special characters "&", "<" and ">" to HTML-safe sequences.
        If the optional flag quote is true (the default), the quotation mark
        characters, both double quote (") and single quote (') characters are also
        translated.
        """
        s = s.replace("&", "&") # Must be done first!
        s = s.replace("<", "<")
        s = s.replace(">", ">")
        if quote:
            s = s.replace('"', """)
            s = s.replace('\'', "'")
        return s
    
    
    # see http://www.w3.org/TR/html5/syntax.html#tokenizing-character-references
    
    _invalid_charrefs = {
        0x00: '\ufffd',  # REPLACEMENT CHARACTER
        0x0d: '\r',      # CARRIAGE RETURN
        0x80: '\u20ac',  # EURO SIGN
        0x81: '\x81',    # 
        0x82: '\u201a',  # SINGLE LOW-9 QUOTATION MARK
        0x83: '\u0192',  # LATIN SMALL LETTER F WITH HOOK
        0x84: '\u201e',  # DOUBLE LOW-9 QUOTATION MARK
        0x85: '\u2026',  # HORIZONTAL ELLIPSIS
        0x86: '\u2020',  # DAGGER
        0x87: '\u2021',  # DOUBLE DAGGER
        0x88: '\u02c6',  # MODIFIER LETTER CIRCUMFLEX ACCENT
        0x89: '\u2030',  # PER MILLE SIGN
        0x8a: '\u0160',  # LATIN CAPITAL LETTER S WITH CARON
        0x8b: '\u2039',  # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
        0x8c: '\u0152',  # LATIN CAPITAL LIGATURE OE
        0x8d: '\x8d',    # 
        0x8e: '\u017d',  # LATIN CAPITAL LETTER Z WITH CARON
        0x8f: '\x8f',    # 
        0x90: '\x90',    # 
        0x91: '\u2018',  # LEFT SINGLE QUOTATION MARK
        0x92: '\u2019',  # RIGHT SINGLE QUOTATION MARK
        0x93: '\u201c',  # LEFT DOUBLE QUOTATION MARK
        0x94: '\u201d',  # RIGHT DOUBLE QUOTATION MARK
        0x95: '\u2022',  # BULLET
        0x96: '\u2013',  # EN DASH
        0x97: '\u2014',  # EM DASH
        0x98: '\u02dc',  # SMALL TILDE
        0x99: '\u2122',  # TRADE MARK SIGN
        0x9a: '\u0161',  # LATIN SMALL LETTER S WITH CARON
        0x9b: '\u203a',  # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
        0x9c: '\u0153',  # LATIN SMALL LIGATURE OE
        0x9d: '\x9d',    # 
        0x9e: '\u017e',  # LATIN SMALL LETTER Z WITH CARON
        0x9f: '\u0178',  # LATIN CAPITAL LETTER Y WITH DIAERESIS
    }
    
    _invalid_codepoints = {
        # 0x0001 to 0x0008
        0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8,
        # 0x000E to 0x001F
        0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
        0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
        # 0x007F to 0x009F
        0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
        0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
        0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
        # 0xFDD0 to 0xFDEF
        0xfdd0, 0xfdd1, 0xfdd2, 0xfdd3, 0xfdd4, 0xfdd5, 0xfdd6, 0xfdd7, 0xfdd8,
        0xfdd9, 0xfdda, 0xfddb, 0xfddc, 0xfddd, 0xfdde, 0xfddf, 0xfde0, 0xfde1,
        0xfde2, 0xfde3, 0xfde4, 0xfde5, 0xfde6, 0xfde7, 0xfde8, 0xfde9, 0xfdea,
        0xfdeb, 0xfdec, 0xfded, 0xfdee, 0xfdef,
        # others
        0xb, 0xfffe, 0xffff, 0x1fffe, 0x1ffff, 0x2fffe, 0x2ffff, 0x3fffe, 0x3ffff,
        0x4fffe, 0x4ffff, 0x5fffe, 0x5ffff, 0x6fffe, 0x6ffff, 0x7fffe, 0x7ffff,
        0x8fffe, 0x8ffff, 0x9fffe, 0x9ffff, 0xafffe, 0xaffff, 0xbfffe, 0xbffff,
        0xcfffe, 0xcffff, 0xdfffe, 0xdffff, 0xefffe, 0xeffff, 0xffffe, 0xfffff,
        0x10fffe, 0x10ffff
    }
    
    
    def _replace_charref(s):
        s = s.group(1)
        if s[0] == '#':
            # numeric charref
            if s[1] in 'xX':
                num = int(s[2:].rstrip(';'), 16)
            else:
                num = int(s[1:].rstrip(';'))
            if num in _invalid_charrefs:
                return _invalid_charrefs[num]
            if 0xD800 <= num <= 0xDFFF or num > 0x10FFFF:
                return '\uFFFD'
            if num in _invalid_codepoints:
                return ''
            return chr(num)
        else:
            # named charref
            if s in _html5:
                return _html5[s]
            # find the longest matching name (as defined by the standard)
            for x in range(len(s)-1, 1, -1):
                if s[:x] in _html5:
                    return _html5[s[:x]] + s[x:]
            else:
                return '&' + s
    
    
    _charref = _re.compile(r'&(#[0-9]+;?'
                           r'|#[xX][0-9a-fA-F]+;?'
                           r'|[^\t\n\f <&#;]{1,32};?)')
    
    def unescape(s):
        """
        Convert all named and numeric character references (e.g. >, >,
        &x3e;) in the string s to the corresponding unicode characters.
        This function uses the rules defined by the HTML 5 standard
        for both valid and invalid character references, and the list of
        HTML 5 named character references defined in html.entities.html5.
        """
        if '&' not in s:
            return s
        return _charref.sub(_replace_charref, s)
    usr/lib64/python3.6/json/__init__.py000064400000034074151030034240013127 0ustar00r"""JSON (JavaScript Object Notation)  is a subset of
    JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
    interchange format.
    
    :mod:`json` exposes an API familiar to users of the standard library
    :mod:`marshal` and :mod:`pickle` modules.  It is derived from a
    version of the externally maintained simplejson library.
    
    Encoding basic Python object hierarchies::
    
        >>> import json
        >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
        '["foo", {"bar": ["baz", null, 1.0, 2]}]'
        >>> print(json.dumps("\"foo\bar"))
        "\"foo\bar"
        >>> print(json.dumps('\u1234'))
        "\u1234"
        >>> print(json.dumps('\\'))
        "\\"
        >>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
        {"a": 0, "b": 0, "c": 0}
        >>> from io import StringIO
        >>> io = StringIO()
        >>> json.dump(['streaming API'], io)
        >>> io.getvalue()
        '["streaming API"]'
    
    Compact encoding::
    
        >>> import json
        >>> from collections import OrderedDict
        >>> mydict = OrderedDict([('4', 5), ('6', 7)])
        >>> json.dumps([1,2,3,mydict], separators=(',', ':'))
        '[1,2,3,{"4":5,"6":7}]'
    
    Pretty printing::
    
        >>> import json
        >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4))
        {
            "4": 5,
            "6": 7
        }
    
    Decoding JSON::
    
        >>> import json
        >>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}]
        >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
        True
        >>> json.loads('"\\"foo\\bar"') == '"foo\x08ar'
        True
        >>> from io import StringIO
        >>> io = StringIO('["streaming API"]')
        >>> json.load(io)[0] == 'streaming API'
        True
    
    Specializing JSON object decoding::
    
        >>> import json
        >>> def as_complex(dct):
        ...     if '__complex__' in dct:
        ...         return complex(dct['real'], dct['imag'])
        ...     return dct
        ...
        >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
        ...     object_hook=as_complex)
        (1+2j)
        >>> from decimal import Decimal
        >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
        True
    
    Specializing JSON object encoding::
    
        >>> import json
        >>> def encode_complex(obj):
        ...     if isinstance(obj, complex):
        ...         return [obj.real, obj.imag]
        ...     raise TypeError(repr(obj) + " is not JSON serializable")
        ...
        >>> json.dumps(2 + 1j, default=encode_complex)
        '[2.0, 1.0]'
        >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
        '[2.0, 1.0]'
        >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
        '[2.0, 1.0]'
    
    
    Using json.tool from the shell to validate and pretty-print::
    
        $ echo '{"json":"obj"}' | python -m json.tool
        {
            "json": "obj"
        }
        $ echo '{ 1.2:3.4}' | python -m json.tool
        Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
    """
    __version__ = '2.0.9'
    __all__ = [
        'dump', 'dumps', 'load', 'loads',
        'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
    ]
    
    __author__ = 'Bob Ippolito '
    
    from .decoder import JSONDecoder, JSONDecodeError
    from .encoder import JSONEncoder
    import codecs
    
    _default_encoder = JSONEncoder(
        skipkeys=False,
        ensure_ascii=True,
        check_circular=True,
        allow_nan=True,
        indent=None,
        separators=None,
        default=None,
    )
    
    def dump(obj, fp, *, skipkeys=False, ensure_ascii=True, check_circular=True,
            allow_nan=True, cls=None, indent=None, separators=None,
            default=None, sort_keys=False, **kw):
        """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
        ``.write()``-supporting file-like object).
    
        If ``skipkeys`` is true then ``dict`` keys that are not basic types
        (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
        instead of raising a ``TypeError``.
    
        If ``ensure_ascii`` is false, then the strings written to ``fp`` can
        contain non-ASCII characters if they appear in strings contained in
        ``obj``. Otherwise, all such characters are escaped in JSON strings.
    
        If ``check_circular`` is false, then the circular reference check
        for container types will be skipped and a circular reference will
        result in an ``OverflowError`` (or worse).
    
        If ``allow_nan`` is false, then it will be a ``ValueError`` to
        serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
        in strict compliance of the JSON specification, instead of using the
        JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
    
        If ``indent`` is a non-negative integer, then JSON array elements and
        object members will be pretty-printed with that indent level. An indent
        level of 0 will only insert newlines. ``None`` is the most compact
        representation.
    
        If specified, ``separators`` should be an ``(item_separator, key_separator)``
        tuple.  The default is ``(', ', ': ')`` if *indent* is ``None`` and
        ``(',', ': ')`` otherwise.  To get the most compact JSON representation,
        you should specify ``(',', ':')`` to eliminate whitespace.
    
        ``default(obj)`` is a function that should return a serializable version
        of obj or raise TypeError. The default simply raises TypeError.
    
        If *sort_keys* is true (default: ``False``), then the output of
        dictionaries will be sorted by key.
    
        To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
        ``.default()`` method to serialize additional types), specify it with
        the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
    
        """
        # cached encoder
        if (not skipkeys and ensure_ascii and
            check_circular and allow_nan and
            cls is None and indent is None and separators is None and
            default is None and not sort_keys and not kw):
            iterable = _default_encoder.iterencode(obj)
        else:
            if cls is None:
                cls = JSONEncoder
            iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
                check_circular=check_circular, allow_nan=allow_nan, indent=indent,
                separators=separators,
                default=default, sort_keys=sort_keys, **kw).iterencode(obj)
        # could accelerate with writelines in some versions of Python, at
        # a debuggability cost
        for chunk in iterable:
            fp.write(chunk)
    
    
    def dumps(obj, *, skipkeys=False, ensure_ascii=True, check_circular=True,
            allow_nan=True, cls=None, indent=None, separators=None,
            default=None, sort_keys=False, **kw):
        """Serialize ``obj`` to a JSON formatted ``str``.
    
        If ``skipkeys`` is true then ``dict`` keys that are not basic types
        (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
        instead of raising a ``TypeError``.
    
        If ``ensure_ascii`` is false, then the return value can contain non-ASCII
        characters if they appear in strings contained in ``obj``. Otherwise, all
        such characters are escaped in JSON strings.
    
        If ``check_circular`` is false, then the circular reference check
        for container types will be skipped and a circular reference will
        result in an ``OverflowError`` (or worse).
    
        If ``allow_nan`` is false, then it will be a ``ValueError`` to
        serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
        strict compliance of the JSON specification, instead of using the
        JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
    
        If ``indent`` is a non-negative integer, then JSON array elements and
        object members will be pretty-printed with that indent level. An indent
        level of 0 will only insert newlines. ``None`` is the most compact
        representation.
    
        If specified, ``separators`` should be an ``(item_separator, key_separator)``
        tuple.  The default is ``(', ', ': ')`` if *indent* is ``None`` and
        ``(',', ': ')`` otherwise.  To get the most compact JSON representation,
        you should specify ``(',', ':')`` to eliminate whitespace.
    
        ``default(obj)`` is a function that should return a serializable version
        of obj or raise TypeError. The default simply raises TypeError.
    
        If *sort_keys* is true (default: ``False``), then the output of
        dictionaries will be sorted by key.
    
        To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
        ``.default()`` method to serialize additional types), specify it with
        the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
    
        """
        # cached encoder
        if (not skipkeys and ensure_ascii and
            check_circular and allow_nan and
            cls is None and indent is None and separators is None and
            default is None and not sort_keys and not kw):
            return _default_encoder.encode(obj)
        if cls is None:
            cls = JSONEncoder
        return cls(
            skipkeys=skipkeys, ensure_ascii=ensure_ascii,
            check_circular=check_circular, allow_nan=allow_nan, indent=indent,
            separators=separators, default=default, sort_keys=sort_keys,
            **kw).encode(obj)
    
    
    _default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None)
    
    
    def detect_encoding(b):
        bstartswith = b.startswith
        if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)):
            return 'utf-32'
        if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)):
            return 'utf-16'
        if bstartswith(codecs.BOM_UTF8):
            return 'utf-8-sig'
    
        if len(b) >= 4:
            if not b[0]:
                # 00 00 -- -- - utf-32-be
                # 00 XX -- -- - utf-16-be
                return 'utf-16-be' if b[1] else 'utf-32-be'
            if not b[1]:
                # XX 00 00 00 - utf-32-le
                # XX 00 00 XX - utf-16-le
                # XX 00 XX -- - utf-16-le
                return 'utf-16-le' if b[2] or b[3] else 'utf-32-le'
        elif len(b) == 2:
            if not b[0]:
                # 00 XX - utf-16-be
                return 'utf-16-be'
            if not b[1]:
                # XX 00 - utf-16-le
                return 'utf-16-le'
        # default
        return 'utf-8'
    
    
    def load(fp, *, cls=None, object_hook=None, parse_float=None,
            parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
        """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
        a JSON document) to a Python object.
    
        ``object_hook`` is an optional function that will be called with the
        result of any object literal decode (a ``dict``). The return value of
        ``object_hook`` will be used instead of the ``dict``. This feature
        can be used to implement custom decoders (e.g. JSON-RPC class hinting).
    
        ``object_pairs_hook`` is an optional function that will be called with the
        result of any object literal decoded with an ordered list of pairs.  The
        return value of ``object_pairs_hook`` will be used instead of the ``dict``.
        This feature can be used to implement custom decoders that rely on the
        order that the key and value pairs are decoded (for example,
        collections.OrderedDict will remember the order of insertion). If
        ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
    
        To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
        kwarg; otherwise ``JSONDecoder`` is used.
    
        """
        return loads(fp.read(),
            cls=cls, object_hook=object_hook,
            parse_float=parse_float, parse_int=parse_int,
            parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
    
    
    def loads(s, *, encoding=None, cls=None, object_hook=None, parse_float=None,
            parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
        """Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance
        containing a JSON document) to a Python object.
    
        ``object_hook`` is an optional function that will be called with the
        result of any object literal decode (a ``dict``). The return value of
        ``object_hook`` will be used instead of the ``dict``. This feature
        can be used to implement custom decoders (e.g. JSON-RPC class hinting).
    
        ``object_pairs_hook`` is an optional function that will be called with the
        result of any object literal decoded with an ordered list of pairs.  The
        return value of ``object_pairs_hook`` will be used instead of the ``dict``.
        This feature can be used to implement custom decoders that rely on the
        order that the key and value pairs are decoded (for example,
        collections.OrderedDict will remember the order of insertion). If
        ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
    
        ``parse_float``, if specified, will be called with the string
        of every JSON float to be decoded. By default this is equivalent to
        float(num_str). This can be used to use another datatype or parser
        for JSON floats (e.g. decimal.Decimal).
    
        ``parse_int``, if specified, will be called with the string
        of every JSON int to be decoded. By default this is equivalent to
        int(num_str). This can be used to use another datatype or parser
        for JSON integers (e.g. float).
    
        ``parse_constant``, if specified, will be called with one of the
        following strings: -Infinity, Infinity, NaN.
        This can be used to raise an exception if invalid JSON numbers
        are encountered.
    
        To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
        kwarg; otherwise ``JSONDecoder`` is used.
    
        The ``encoding`` argument is ignored and deprecated.
    
        """
        if isinstance(s, str):
            if s.startswith('\ufeff'):
                raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)",
                                      s, 0)
        else:
            if not isinstance(s, (bytes, bytearray)):
                raise TypeError('the JSON object must be str, bytes or bytearray, '
                                'not {!r}'.format(s.__class__.__name__))
            s = s.decode(detect_encoding(s), 'surrogatepass')
    
        if (cls is None and object_hook is None and
                parse_int is None and parse_float is None and
                parse_constant is None and object_pairs_hook is None and not kw):
            return _default_decoder.decode(s)
        if cls is None:
            cls = JSONDecoder
        if object_hook is not None:
            kw['object_hook'] = object_hook
        if object_pairs_hook is not None:
            kw['object_pairs_hook'] = object_pairs_hook
        if parse_float is not None:
            kw['parse_float'] = parse_float
        if parse_int is not None:
            kw['parse_int'] = parse_int
        if parse_constant is not None:
            kw['parse_constant'] = parse_constant
        return cls(**kw).decode(s)
    usr/lib/python3.6/site-packages/firewall/__init__.py000064400000000000151030035040016306 0ustar00usr/lib/python3.6/site-packages/pkg_resources/__init__.py000064400000312616151030065570017412 0ustar00# coding: utf-8
    """
    Package resource API
    --------------------
    
    A resource is a logical file contained within a package, or a logical
    subdirectory thereof.  The package resource API expects resource names
    to have their path parts separated with ``/``, *not* whatever the local
    path separator is.  Do not use os.path operations to manipulate resource
    names being passed into the API.
    
    The package resource API is designed to work with normal filesystem packages,
    .egg files, and unpacked .egg files.  It can also work in a limited way with
    .zip files and with custom PEP 302 loaders that support the ``get_data()``
    method.
    """
    
    from __future__ import absolute_import
    
    import sys
    import os
    import io
    import time
    import re
    import types
    import zipfile
    import zipimport
    import warnings
    import stat
    import functools
    import pkgutil
    import operator
    import platform
    import collections
    import plistlib
    import email.parser
    import errno
    import tempfile
    import textwrap
    import itertools
    import inspect
    from pkgutil import get_importer
    
    try:
        import _imp
    except ImportError:
        # Python 3.2 compatibility
        import imp as _imp
    
    from pkg_resources.extern import six
    from pkg_resources.extern.six.moves import urllib, map, filter
    
    # capture these to bypass sandboxing
    from os import utime
    try:
        from os import mkdir, rename, unlink
        WRITE_SUPPORT = True
    except ImportError:
        # no write support, probably under GAE
        WRITE_SUPPORT = False
    
    from os import open as os_open
    from os.path import isdir, split
    
    try:
        import importlib.machinery as importlib_machinery
        # access attribute to force import under delayed import mechanisms.
        importlib_machinery.__name__
    except ImportError:
        importlib_machinery = None
    
    from . import py31compat
    from pkg_resources.extern import appdirs
    from pkg_resources.extern import packaging
    __import__('pkg_resources.extern.packaging.version')
    __import__('pkg_resources.extern.packaging.specifiers')
    __import__('pkg_resources.extern.packaging.requirements')
    __import__('pkg_resources.extern.packaging.markers')
    
    
    if (3, 0) < sys.version_info < (3, 3):
        raise RuntimeError("Python 3.3 or later is required")
    
    if six.PY2:
        # Those builtin exceptions are only defined in Python 3
        PermissionError = None
        NotADirectoryError = None
    
    # declare some globals that will be defined later to
    # satisfy the linters.
    require = None
    working_set = None
    add_activation_listener = None
    resources_stream = None
    cleanup_resources = None
    resource_dir = None
    resource_stream = None
    set_extraction_path = None
    resource_isdir = None
    resource_string = None
    iter_entry_points = None
    resource_listdir = None
    resource_filename = None
    resource_exists = None
    _distribution_finders = None
    _namespace_handlers = None
    _namespace_packages = None
    
    
    class PEP440Warning(RuntimeWarning):
        """
        Used when there is an issue with a version or specifier not complying with
        PEP 440.
        """
    
    
    def parse_version(v):
        try:
            return packaging.version.Version(v)
        except packaging.version.InvalidVersion:
            return packaging.version.LegacyVersion(v)
    
    
    _state_vars = {}
    
    
    def _declare_state(vartype, **kw):
        globals().update(kw)
        _state_vars.update(dict.fromkeys(kw, vartype))
    
    
    def __getstate__():
        state = {}
        g = globals()
        for k, v in _state_vars.items():
            state[k] = g['_sget_' + v](g[k])
        return state
    
    
    def __setstate__(state):
        g = globals()
        for k, v in state.items():
            g['_sset_' + _state_vars[k]](k, g[k], v)
        return state
    
    
    def _sget_dict(val):
        return val.copy()
    
    
    def _sset_dict(key, ob, state):
        ob.clear()
        ob.update(state)
    
    
    def _sget_object(val):
        return val.__getstate__()
    
    
    def _sset_object(key, ob, state):
        ob.__setstate__(state)
    
    
    _sget_none = _sset_none = lambda *args: None
    
    
    def get_supported_platform():
        """Return this platform's maximum compatible version.
    
        distutils.util.get_platform() normally reports the minimum version
        of Mac OS X that would be required to *use* extensions produced by
        distutils.  But what we want when checking compatibility is to know the
        version of Mac OS X that we are *running*.  To allow usage of packages that
        explicitly require a newer version of Mac OS X, we must also know the
        current version of the OS.
    
        If this condition occurs for any other platform with a version in its
        platform strings, this function should be extended accordingly.
        """
        plat = get_build_platform()
        m = macosVersionString.match(plat)
        if m is not None and sys.platform == "darwin":
            try:
                plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
            except ValueError:
                # not Mac OS X
                pass
        return plat
    
    
    __all__ = [
        # Basic resource access and distribution/entry point discovery
        'require', 'run_script', 'get_provider', 'get_distribution',
        'load_entry_point', 'get_entry_map', 'get_entry_info',
        'iter_entry_points',
        'resource_string', 'resource_stream', 'resource_filename',
        'resource_listdir', 'resource_exists', 'resource_isdir',
    
        # Environmental control
        'declare_namespace', 'working_set', 'add_activation_listener',
        'find_distributions', 'set_extraction_path', 'cleanup_resources',
        'get_default_cache',
    
        # Primary implementation classes
        'Environment', 'WorkingSet', 'ResourceManager',
        'Distribution', 'Requirement', 'EntryPoint',
    
        # Exceptions
        'ResolutionError', 'VersionConflict', 'DistributionNotFound',
        'UnknownExtra', 'ExtractionError',
    
        # Warnings
        'PEP440Warning',
    
        # Parsing functions and string utilities
        'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
        'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
        'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
    
        # filesystem utilities
        'ensure_directory', 'normalize_path',
    
        # Distribution "precedence" constants
        'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
    
        # "Provider" interfaces, implementations, and registration/lookup APIs
        'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
        'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
        'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
        'register_finder', 'register_namespace_handler', 'register_loader_type',
        'fixup_namespace_packages', 'get_importer',
    
        # Deprecated/backward compatibility only
        'run_main', 'AvailableDistributions',
    ]
    
    
    class ResolutionError(Exception):
        """Abstract base for dependency resolution errors"""
    
        def __repr__(self):
            return self.__class__.__name__ + repr(self.args)
    
    
    class VersionConflict(ResolutionError):
        """
        An already-installed version conflicts with the requested version.
    
        Should be initialized with the installed Distribution and the requested
        Requirement.
        """
    
        _template = "{self.dist} is installed but {self.req} is required"
    
        @property
        def dist(self):
            return self.args[0]
    
        @property
        def req(self):
            return self.args[1]
    
        def report(self):
            return self._template.format(**locals())
    
        def with_context(self, required_by):
            """
            If required_by is non-empty, return a version of self that is a
            ContextualVersionConflict.
            """
            if not required_by:
                return self
            args = self.args + (required_by,)
            return ContextualVersionConflict(*args)
    
    
    class ContextualVersionConflict(VersionConflict):
        """
        A VersionConflict that accepts a third parameter, the set of the
        requirements that required the installed Distribution.
        """
    
        _template = VersionConflict._template + ' by {self.required_by}'
    
        @property
        def required_by(self):
            return self.args[2]
    
    
    class DistributionNotFound(ResolutionError):
        """A requested distribution was not found"""
    
        _template = ("The '{self.req}' distribution was not found "
                     "and is required by {self.requirers_str}")
    
        @property
        def req(self):
            return self.args[0]
    
        @property
        def requirers(self):
            return self.args[1]
    
        @property
        def requirers_str(self):
            if not self.requirers:
                return 'the application'
            return ', '.join(self.requirers)
    
        def report(self):
            return self._template.format(**locals())
    
        def __str__(self):
            return self.report()
    
    
    class UnknownExtra(ResolutionError):
        """Distribution doesn't have an "extra feature" of the given name"""
    
    
    _provider_factories = {}
    
    PY_MAJOR = sys.version[:3]
    EGG_DIST = 3
    BINARY_DIST = 2
    SOURCE_DIST = 1
    CHECKOUT_DIST = 0
    DEVELOP_DIST = -1
    
    
    def register_loader_type(loader_type, provider_factory):
        """Register `provider_factory` to make providers for `loader_type`
    
        `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
        and `provider_factory` is a function that, passed a *module* object,
        returns an ``IResourceProvider`` for that module.
        """
        _provider_factories[loader_type] = provider_factory
    
    
    def get_provider(moduleOrReq):
        """Return an IResourceProvider for the named module or requirement"""
        if isinstance(moduleOrReq, Requirement):
            return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
        try:
            module = sys.modules[moduleOrReq]
        except KeyError:
            __import__(moduleOrReq)
            module = sys.modules[moduleOrReq]
        loader = getattr(module, '__loader__', None)
        return _find_adapter(_provider_factories, loader)(module)
    
    
    def _macosx_vers(_cache=[]):
        if not _cache:
            version = platform.mac_ver()[0]
            # fallback for MacPorts
            if version == '':
                plist = '/System/Library/CoreServices/SystemVersion.plist'
                if os.path.exists(plist):
                    if hasattr(plistlib, 'readPlist'):
                        plist_content = plistlib.readPlist(plist)
                        if 'ProductVersion' in plist_content:
                            version = plist_content['ProductVersion']
    
            _cache.append(version.split('.'))
        return _cache[0]
    
    
    def _macosx_arch(machine):
        return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
    
    
    def get_build_platform():
        """Return this platform's string for platform-specific distributions
    
        XXX Currently this is the same as ``distutils.util.get_platform()``, but it
        needs some hacks for Linux and Mac OS X.
        """
        from sysconfig import get_platform
    
        plat = get_platform()
        if sys.platform == "darwin" and not plat.startswith('macosx-'):
            try:
                version = _macosx_vers()
                machine = os.uname()[4].replace(" ", "_")
                return "macosx-%d.%d-%s" % (
                    int(version[0]), int(version[1]),
                    _macosx_arch(machine),
                )
            except ValueError:
                # if someone is running a non-Mac darwin system, this will fall
                # through to the default implementation
                pass
        return plat
    
    
    macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
    darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
    # XXX backward compat
    get_platform = get_build_platform
    
    
    def compatible_platforms(provided, required):
        """Can code for the `provided` platform run on the `required` platform?
    
        Returns true if either platform is ``None``, or the platforms are equal.
    
        XXX Needs compatibility checks for Linux and other unixy OSes.
        """
        if provided is None or required is None or provided == required:
            # easy case
            return True
    
        # Mac OS X special cases
        reqMac = macosVersionString.match(required)
        if reqMac:
            provMac = macosVersionString.match(provided)
    
            # is this a Mac package?
            if not provMac:
                # this is backwards compatibility for packages built before
                # setuptools 0.6. All packages built after this point will
                # use the new macosx designation.
                provDarwin = darwinVersionString.match(provided)
                if provDarwin:
                    dversion = int(provDarwin.group(1))
                    macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
                    if dversion == 7 and macosversion >= "10.3" or \
                            dversion == 8 and macosversion >= "10.4":
                        return True
                # egg isn't macosx or legacy darwin
                return False
    
            # are they the same major version and machine type?
            if provMac.group(1) != reqMac.group(1) or \
                    provMac.group(3) != reqMac.group(3):
                return False
    
            # is the required OS major update >= the provided one?
            if int(provMac.group(2)) > int(reqMac.group(2)):
                return False
    
            return True
    
        # XXX Linux and other platforms' special cases should go here
        return False
    
    
    def run_script(dist_spec, script_name):
        """Locate distribution `dist_spec` and run its `script_name` script"""
        ns = sys._getframe(1).f_globals
        name = ns['__name__']
        ns.clear()
        ns['__name__'] = name
        require(dist_spec)[0].run_script(script_name, ns)
    
    
    # backward compatibility
    run_main = run_script
    
    
    def get_distribution(dist):
        """Return a current distribution object for a Requirement or string"""
        if isinstance(dist, six.string_types):
            dist = Requirement.parse(dist)
        if isinstance(dist, Requirement):
            dist = get_provider(dist)
        if not isinstance(dist, Distribution):
            raise TypeError("Expected string, Requirement, or Distribution", dist)
        return dist
    
    
    def load_entry_point(dist, group, name):
        """Return `name` entry point of `group` for `dist` or raise ImportError"""
        return get_distribution(dist).load_entry_point(group, name)
    
    
    def get_entry_map(dist, group=None):
        """Return the entry point map for `group`, or the full entry map"""
        return get_distribution(dist).get_entry_map(group)
    
    
    def get_entry_info(dist, group, name):
        """Return the EntryPoint object for `group`+`name`, or ``None``"""
        return get_distribution(dist).get_entry_info(group, name)
    
    
    class IMetadataProvider:
        def has_metadata(name):
            """Does the package's distribution contain the named metadata?"""
    
        def get_metadata(name):
            """The named metadata resource as a string"""
    
        def get_metadata_lines(name):
            """Yield named metadata resource as list of non-blank non-comment lines
    
           Leading and trailing whitespace is stripped from each line, and lines
           with ``#`` as the first non-blank character are omitted."""
    
        def metadata_isdir(name):
            """Is the named metadata a directory?  (like ``os.path.isdir()``)"""
    
        def metadata_listdir(name):
            """List of metadata names in the directory (like ``os.listdir()``)"""
    
        def run_script(script_name, namespace):
            """Execute the named script in the supplied namespace dictionary"""
    
    
    class IResourceProvider(IMetadataProvider):
        """An object that provides access to package resources"""
    
        def get_resource_filename(manager, resource_name):
            """Return a true filesystem path for `resource_name`
    
            `manager` must be an ``IResourceManager``"""
    
        def get_resource_stream(manager, resource_name):
            """Return a readable file-like object for `resource_name`
    
            `manager` must be an ``IResourceManager``"""
    
        def get_resource_string(manager, resource_name):
            """Return a string containing the contents of `resource_name`
    
            `manager` must be an ``IResourceManager``"""
    
        def has_resource(resource_name):
            """Does the package contain the named resource?"""
    
        def resource_isdir(resource_name):
            """Is the named resource a directory?  (like ``os.path.isdir()``)"""
    
        def resource_listdir(resource_name):
            """List of resource names in the directory (like ``os.listdir()``)"""
    
    
    class WorkingSet(object):
        """A collection of active distributions on sys.path (or a similar list)"""
    
        def __init__(self, entries=None):
            """Create working set from list of path entries (default=sys.path)"""
            self.entries = []
            self.entry_keys = {}
            self.by_key = {}
            self.callbacks = []
    
            if entries is None:
                entries = sys.path
    
            for entry in entries:
                self.add_entry(entry)
    
        @classmethod
        def _build_master(cls):
            """
            Prepare the master working set.
            """
            ws = cls()
            try:
                from __main__ import __requires__
            except ImportError:
                # The main program does not list any requirements
                return ws
    
            # ensure the requirements are met
            try:
                ws.require(__requires__)
            except VersionConflict:
                return cls._build_from_requirements(__requires__)
    
            return ws
    
        @classmethod
        def _build_from_requirements(cls, req_spec):
            """
            Build a working set from a requirement spec. Rewrites sys.path.
            """
            # try it without defaults already on sys.path
            # by starting with an empty path
            ws = cls([])
            reqs = parse_requirements(req_spec)
            dists = ws.resolve(reqs, Environment())
            for dist in dists:
                ws.add(dist)
    
            # add any missing entries from sys.path
            for entry in sys.path:
                if entry not in ws.entries:
                    ws.add_entry(entry)
    
            # then copy back to sys.path
            sys.path[:] = ws.entries
            return ws
    
        def add_entry(self, entry):
            """Add a path item to ``.entries``, finding any distributions on it
    
            ``find_distributions(entry, True)`` is used to find distributions
            corresponding to the path entry, and they are added.  `entry` is
            always appended to ``.entries``, even if it is already present.
            (This is because ``sys.path`` can contain the same value more than
            once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
            equal ``sys.path``.)
            """
            self.entry_keys.setdefault(entry, [])
            self.entries.append(entry)
            for dist in find_distributions(entry, True):
                self.add(dist, entry, False)
    
        def __contains__(self, dist):
            """True if `dist` is the active distribution for its project"""
            return self.by_key.get(dist.key) == dist
    
        def find(self, req):
            """Find a distribution matching requirement `req`
    
            If there is an active distribution for the requested project, this
            returns it as long as it meets the version requirement specified by
            `req`.  But, if there is an active distribution for the project and it
            does *not* meet the `req` requirement, ``VersionConflict`` is raised.
            If there is no active distribution for the requested project, ``None``
            is returned.
            """
            dist = self.by_key.get(req.key)
            if dist is not None and dist not in req:
                # XXX add more info
                raise VersionConflict(dist, req)
            return dist
    
        def iter_entry_points(self, group, name=None):
            """Yield entry point objects from `group` matching `name`
    
            If `name` is None, yields all entry points in `group` from all
            distributions in the working set, otherwise only ones matching
            both `group` and `name` are yielded (in distribution order).
            """
            for dist in self:
                entries = dist.get_entry_map(group)
                if name is None:
                    for ep in entries.values():
                        yield ep
                elif name in entries:
                    yield entries[name]
    
        def run_script(self, requires, script_name):
            """Locate distribution for `requires` and run `script_name` script"""
            ns = sys._getframe(1).f_globals
            name = ns['__name__']
            ns.clear()
            ns['__name__'] = name
            self.require(requires)[0].run_script(script_name, ns)
    
        def __iter__(self):
            """Yield distributions for non-duplicate projects in the working set
    
            The yield order is the order in which the items' path entries were
            added to the working set.
            """
            seen = {}
            for item in self.entries:
                if item not in self.entry_keys:
                    # workaround a cache issue
                    continue
    
                for key in self.entry_keys[item]:
                    if key not in seen:
                        seen[key] = 1
                        yield self.by_key[key]
    
        def add(self, dist, entry=None, insert=True, replace=False):
            """Add `dist` to working set, associated with `entry`
    
            If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
            On exit from this routine, `entry` is added to the end of the working
            set's ``.entries`` (if it wasn't already present).
    
            `dist` is only added to the working set if it's for a project that
            doesn't already have a distribution in the set, unless `replace=True`.
            If it's added, any callbacks registered with the ``subscribe()`` method
            will be called.
            """
            if insert:
                dist.insert_on(self.entries, entry, replace=replace)
    
            if entry is None:
                entry = dist.location
            keys = self.entry_keys.setdefault(entry, [])
            keys2 = self.entry_keys.setdefault(dist.location, [])
            if not replace and dist.key in self.by_key:
                # ignore hidden distros
                return
    
            self.by_key[dist.key] = dist
            if dist.key not in keys:
                keys.append(dist.key)
            if dist.key not in keys2:
                keys2.append(dist.key)
            self._added_new(dist)
    
        def resolve(self, requirements, env=None, installer=None,
                    replace_conflicting=False, extras=None):
            """List all distributions needed to (recursively) meet `requirements`
    
            `requirements` must be a sequence of ``Requirement`` objects.  `env`,
            if supplied, should be an ``Environment`` instance.  If
            not supplied, it defaults to all distributions available within any
            entry or distribution in the working set.  `installer`, if supplied,
            will be invoked with each requirement that cannot be met by an
            already-installed distribution; it should return a ``Distribution`` or
            ``None``.
    
            Unless `replace_conflicting=True`, raises a VersionConflict exception
            if
            any requirements are found on the path that have the correct name but
            the wrong version.  Otherwise, if an `installer` is supplied it will be
            invoked to obtain the correct version of the requirement and activate
            it.
    
            `extras` is a list of the extras to be used with these requirements.
            This is important because extra requirements may look like `my_req;
            extra = "my_extra"`, which would otherwise be interpreted as a purely
            optional requirement.  Instead, we want to be able to assert that these
            requirements are truly required.
            """
    
            # set up the stack
            requirements = list(requirements)[::-1]
            # set of processed requirements
            processed = {}
            # key -> dist
            best = {}
            to_activate = []
    
            req_extras = _ReqExtras()
    
            # Mapping of requirement to set of distributions that required it;
            # useful for reporting info about conflicts.
            required_by = collections.defaultdict(set)
    
            while requirements:
                # process dependencies breadth-first
                req = requirements.pop(0)
                if req in processed:
                    # Ignore cyclic or redundant dependencies
                    continue
    
                if not req_extras.markers_pass(req, extras):
                    continue
    
                dist = best.get(req.key)
                if dist is None:
                    # Find the best distribution and add it to the map
                    dist = self.by_key.get(req.key)
                    if dist is None or (dist not in req and replace_conflicting):
                        ws = self
                        if env is None:
                            if dist is None:
                                env = Environment(self.entries)
                            else:
                                # Use an empty environment and workingset to avoid
                                # any further conflicts with the conflicting
                                # distribution
                                env = Environment([])
                                ws = WorkingSet([])
                        dist = best[req.key] = env.best_match(
                            req, ws, installer,
                            replace_conflicting=replace_conflicting
                        )
                        if dist is None:
                            requirers = required_by.get(req, None)
                            raise DistributionNotFound(req, requirers)
                    to_activate.append(dist)
                if dist not in req:
                    # Oops, the "best" so far conflicts with a dependency
                    dependent_req = required_by[req]
                    raise VersionConflict(dist, req).with_context(dependent_req)
    
                # push the new requirements onto the stack
                new_requirements = dist.requires(req.extras)[::-1]
                requirements.extend(new_requirements)
    
                # Register the new requirements needed by req
                for new_requirement in new_requirements:
                    required_by[new_requirement].add(req.project_name)
                    req_extras[new_requirement] = req.extras
    
                processed[req] = True
    
            # return list of distros to activate
            return to_activate
    
        def find_plugins(
                self, plugin_env, full_env=None, installer=None, fallback=True):
            """Find all activatable distributions in `plugin_env`
    
            Example usage::
    
                distributions, errors = working_set.find_plugins(
                    Environment(plugin_dirlist)
                )
                # add plugins+libs to sys.path
                map(working_set.add, distributions)
                # display errors
                print('Could not load', errors)
    
            The `plugin_env` should be an ``Environment`` instance that contains
            only distributions that are in the project's "plugin directory" or
            directories. The `full_env`, if supplied, should be an ``Environment``
            contains all currently-available distributions.  If `full_env` is not
            supplied, one is created automatically from the ``WorkingSet`` this
            method is called on, which will typically mean that every directory on
            ``sys.path`` will be scanned for distributions.
    
            `installer` is a standard installer callback as used by the
            ``resolve()`` method. The `fallback` flag indicates whether we should
            attempt to resolve older versions of a plugin if the newest version
            cannot be resolved.
    
            This method returns a 2-tuple: (`distributions`, `error_info`), where
            `distributions` is a list of the distributions found in `plugin_env`
            that were loadable, along with any other distributions that are needed
            to resolve their dependencies.  `error_info` is a dictionary mapping
            unloadable plugin distributions to an exception instance describing the
            error that occurred. Usually this will be a ``DistributionNotFound`` or
            ``VersionConflict`` instance.
            """
    
            plugin_projects = list(plugin_env)
            # scan project names in alphabetic order
            plugin_projects.sort()
    
            error_info = {}
            distributions = {}
    
            if full_env is None:
                env = Environment(self.entries)
                env += plugin_env
            else:
                env = full_env + plugin_env
    
            shadow_set = self.__class__([])
            # put all our entries in shadow_set
            list(map(shadow_set.add, self))
    
            for project_name in plugin_projects:
    
                for dist in plugin_env[project_name]:
    
                    req = [dist.as_requirement()]
    
                    try:
                        resolvees = shadow_set.resolve(req, env, installer)
    
                    except ResolutionError as v:
                        # save error info
                        error_info[dist] = v
                        if fallback:
                            # try the next older version of project
                            continue
                        else:
                            # give up on this project, keep going
                            break
    
                    else:
                        list(map(shadow_set.add, resolvees))
                        distributions.update(dict.fromkeys(resolvees))
    
                        # success, no need to try any more versions of this project
                        break
    
            distributions = list(distributions)
            distributions.sort()
    
            return distributions, error_info
    
        def require(self, *requirements):
            """Ensure that distributions matching `requirements` are activated
    
            `requirements` must be a string or a (possibly-nested) sequence
            thereof, specifying the distributions and versions required.  The
            return value is a sequence of the distributions that needed to be
            activated to fulfill the requirements; all relevant distributions are
            included, even if they were already activated in this working set.
            """
            needed = self.resolve(parse_requirements(requirements))
    
            for dist in needed:
                self.add(dist)
    
            return needed
    
        def subscribe(self, callback, existing=True):
            """Invoke `callback` for all distributions
    
            If `existing=True` (default),
            call on all existing ones, as well.
            """
            if callback in self.callbacks:
                return
            self.callbacks.append(callback)
            if not existing:
                return
            for dist in self:
                callback(dist)
    
        def _added_new(self, dist):
            for callback in self.callbacks:
                callback(dist)
    
        def __getstate__(self):
            return (
                self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
                self.callbacks[:]
            )
    
        def __setstate__(self, e_k_b_c):
            entries, keys, by_key, callbacks = e_k_b_c
            self.entries = entries[:]
            self.entry_keys = keys.copy()
            self.by_key = by_key.copy()
            self.callbacks = callbacks[:]
    
    
    class _ReqExtras(dict):
        """
        Map each requirement to the extras that demanded it.
        """
    
        def markers_pass(self, req, extras=None):
            """
            Evaluate markers for req against each extra that
            demanded it.
    
            Return False if the req has a marker and fails
            evaluation. Otherwise, return True.
            """
            extra_evals = (
                req.marker.evaluate({'extra': extra})
                for extra in self.get(req, ()) + (extras or (None,))
            )
            return not req.marker or any(extra_evals)
    
    
    class Environment(object):
        """Searchable snapshot of distributions on a search path"""
    
        def __init__(
                self, search_path=None, platform=get_supported_platform(),
                python=PY_MAJOR):
            """Snapshot distributions available on a search path
    
            Any distributions found on `search_path` are added to the environment.
            `search_path` should be a sequence of ``sys.path`` items.  If not
            supplied, ``sys.path`` is used.
    
            `platform` is an optional string specifying the name of the platform
            that platform-specific distributions must be compatible with.  If
            unspecified, it defaults to the current platform.  `python` is an
            optional string naming the desired version of Python (e.g. ``'3.3'``);
            it defaults to the current version.
    
            You may explicitly set `platform` (and/or `python`) to ``None`` if you
            wish to map *all* distributions, not just those compatible with the
            running platform or Python version.
            """
            self._distmap = {}
            self.platform = platform
            self.python = python
            self.scan(search_path)
    
        def can_add(self, dist):
            """Is distribution `dist` acceptable for this environment?
    
            The distribution must match the platform and python version
            requirements specified when this environment was created, or False
            is returned.
            """
            py_compat = (
                self.python is None
                or dist.py_version is None
                or dist.py_version == self.python
            )
            return py_compat and compatible_platforms(dist.platform, self.platform)
    
        def remove(self, dist):
            """Remove `dist` from the environment"""
            self._distmap[dist.key].remove(dist)
    
        def scan(self, search_path=None):
            """Scan `search_path` for distributions usable in this environment
    
            Any distributions found are added to the environment.
            `search_path` should be a sequence of ``sys.path`` items.  If not
            supplied, ``sys.path`` is used.  Only distributions conforming to
            the platform/python version defined at initialization are added.
            """
            if search_path is None:
                search_path = sys.path
    
            for item in search_path:
                for dist in find_distributions(item):
                    self.add(dist)
    
        def __getitem__(self, project_name):
            """Return a newest-to-oldest list of distributions for `project_name`
    
            Uses case-insensitive `project_name` comparison, assuming all the
            project's distributions use their project's name converted to all
            lowercase as their key.
    
            """
            distribution_key = project_name.lower()
            return self._distmap.get(distribution_key, [])
    
        def add(self, dist):
            """Add `dist` if we ``can_add()`` it and it has not already been added
            """
            if self.can_add(dist) and dist.has_version():
                dists = self._distmap.setdefault(dist.key, [])
                if dist not in dists:
                    dists.append(dist)
                    dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
    
        def best_match(
                self, req, working_set, installer=None, replace_conflicting=False):
            """Find distribution best matching `req` and usable on `working_set`
    
            This calls the ``find(req)`` method of the `working_set` to see if a
            suitable distribution is already active.  (This may raise
            ``VersionConflict`` if an unsuitable version of the project is already
            active in the specified `working_set`.)  If a suitable distribution
            isn't active, this method returns the newest distribution in the
            environment that meets the ``Requirement`` in `req`.  If no suitable
            distribution is found, and `installer` is supplied, then the result of
            calling the environment's ``obtain(req, installer)`` method will be
            returned.
            """
            try:
                dist = working_set.find(req)
            except VersionConflict:
                if not replace_conflicting:
                    raise
                dist = None
            if dist is not None:
                return dist
            for dist in self[req.key]:
                if dist in req:
                    return dist
            # try to download/install
            return self.obtain(req, installer)
    
        def obtain(self, requirement, installer=None):
            """Obtain a distribution matching `requirement` (e.g. via download)
    
            Obtain a distro that matches requirement (e.g. via download).  In the
            base ``Environment`` class, this routine just returns
            ``installer(requirement)``, unless `installer` is None, in which case
            None is returned instead.  This method is a hook that allows subclasses
            to attempt other ways of obtaining a distribution before falling back
            to the `installer` argument."""
            if installer is not None:
                return installer(requirement)
    
        def __iter__(self):
            """Yield the unique project names of the available distributions"""
            for key in self._distmap.keys():
                if self[key]:
                    yield key
    
        def __iadd__(self, other):
            """In-place addition of a distribution or environment"""
            if isinstance(other, Distribution):
                self.add(other)
            elif isinstance(other, Environment):
                for project in other:
                    for dist in other[project]:
                        self.add(dist)
            else:
                raise TypeError("Can't add %r to environment" % (other,))
            return self
    
        def __add__(self, other):
            """Add an environment or distribution to an environment"""
            new = self.__class__([], platform=None, python=None)
            for env in self, other:
                new += env
            return new
    
    
    # XXX backward compatibility
    AvailableDistributions = Environment
    
    
    class ExtractionError(RuntimeError):
        """An error occurred extracting a resource
    
        The following attributes are available from instances of this exception:
    
        manager
            The resource manager that raised this exception
    
        cache_path
            The base directory for resource extraction
    
        original_error
            The exception instance that caused extraction to fail
        """
    
    
    class ResourceManager:
        """Manage resource extraction and packages"""
        extraction_path = None
    
        def __init__(self):
            self.cached_files = {}
    
        def resource_exists(self, package_or_requirement, resource_name):
            """Does the named resource exist?"""
            return get_provider(package_or_requirement).has_resource(resource_name)
    
        def resource_isdir(self, package_or_requirement, resource_name):
            """Is the named resource an existing directory?"""
            return get_provider(package_or_requirement).resource_isdir(
                resource_name
            )
    
        def resource_filename(self, package_or_requirement, resource_name):
            """Return a true filesystem path for specified resource"""
            return get_provider(package_or_requirement).get_resource_filename(
                self, resource_name
            )
    
        def resource_stream(self, package_or_requirement, resource_name):
            """Return a readable file-like object for specified resource"""
            return get_provider(package_or_requirement).get_resource_stream(
                self, resource_name
            )
    
        def resource_string(self, package_or_requirement, resource_name):
            """Return specified resource as a string"""
            return get_provider(package_or_requirement).get_resource_string(
                self, resource_name
            )
    
        def resource_listdir(self, package_or_requirement, resource_name):
            """List the contents of the named resource directory"""
            return get_provider(package_or_requirement).resource_listdir(
                resource_name
            )
    
        def extraction_error(self):
            """Give an error message for problems extracting file(s)"""
    
            old_exc = sys.exc_info()[1]
            cache_path = self.extraction_path or get_default_cache()
    
            tmpl = textwrap.dedent("""
                Can't extract file(s) to egg cache
    
                The following error occurred while trying to extract file(s)
                to the Python egg cache:
    
                  {old_exc}
    
                The Python egg cache directory is currently set to:
    
                  {cache_path}
    
                Perhaps your account does not have write access to this directory?
                You can change the cache directory by setting the PYTHON_EGG_CACHE
                environment variable to point to an accessible directory.
                """).lstrip()
            err = ExtractionError(tmpl.format(**locals()))
            err.manager = self
            err.cache_path = cache_path
            err.original_error = old_exc
            raise err
    
        def get_cache_path(self, archive_name, names=()):
            """Return absolute location in cache for `archive_name` and `names`
    
            The parent directory of the resulting path will be created if it does
            not already exist.  `archive_name` should be the base filename of the
            enclosing egg (which may not be the name of the enclosing zipfile!),
            including its ".egg" extension.  `names`, if provided, should be a
            sequence of path name parts "under" the egg's extraction location.
    
            This method should only be called by resource providers that need to
            obtain an extraction location, and only for names they intend to
            extract, as it tracks the generated names for possible cleanup later.
            """
            extract_path = self.extraction_path or get_default_cache()
            target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
            try:
                _bypass_ensure_directory(target_path)
            except Exception:
                self.extraction_error()
    
            self._warn_unsafe_extraction_path(extract_path)
    
            self.cached_files[target_path] = 1
            return target_path
    
        @staticmethod
        def _warn_unsafe_extraction_path(path):
            """
            If the default extraction path is overridden and set to an insecure
            location, such as /tmp, it opens up an opportunity for an attacker to
            replace an extracted file with an unauthorized payload. Warn the user
            if a known insecure location is used.
    
            See Distribute #375 for more details.
            """
            if os.name == 'nt' and not path.startswith(os.environ['windir']):
                # On Windows, permissions are generally restrictive by default
                #  and temp directories are not writable by other users, so
                #  bypass the warning.
                return
            mode = os.stat(path).st_mode
            if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
                msg = (
                    "%s is writable by group/others and vulnerable to attack "
                    "when "
                    "used with get_resource_filename. Consider a more secure "
                    "location (set with .set_extraction_path or the "
                    "PYTHON_EGG_CACHE environment variable)." % path
                )
                warnings.warn(msg, UserWarning)
    
        def postprocess(self, tempname, filename):
            """Perform any platform-specific postprocessing of `tempname`
    
            This is where Mac header rewrites should be done; other platforms don't
            have anything special they should do.
    
            Resource providers should call this method ONLY after successfully
            extracting a compressed resource.  They must NOT call it on resources
            that are already in the filesystem.
    
            `tempname` is the current (temporary) name of the file, and `filename`
            is the name it will be renamed to by the caller after this routine
            returns.
            """
    
            if os.name == 'posix':
                # Make the resource executable
                mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
                os.chmod(tempname, mode)
    
        def set_extraction_path(self, path):
            """Set the base path where resources will be extracted to, if needed.
    
            If you do not call this routine before any extractions take place, the
            path defaults to the return value of ``get_default_cache()``.  (Which
            is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
            platform-specific fallbacks.  See that routine's documentation for more
            details.)
    
            Resources are extracted to subdirectories of this path based upon
            information given by the ``IResourceProvider``.  You may set this to a
            temporary directory, but then you must call ``cleanup_resources()`` to
            delete the extracted files when done.  There is no guarantee that
            ``cleanup_resources()`` will be able to remove all extracted files.
    
            (Note: you may not change the extraction path for a given resource
            manager once resources have been extracted, unless you first call
            ``cleanup_resources()``.)
            """
            if self.cached_files:
                raise ValueError(
                    "Can't change extraction path, files already extracted"
                )
    
            self.extraction_path = path
    
        def cleanup_resources(self, force=False):
            """
            Delete all extracted resource files and directories, returning a list
            of the file and directory names that could not be successfully removed.
            This function does not have any concurrency protection, so it should
            generally only be called when the extraction path is a temporary
            directory exclusive to a single process.  This method is not
            automatically called; you must call it explicitly or register it as an
            ``atexit`` function if you wish to ensure cleanup of a temporary
            directory used for extractions.
            """
            # XXX
    
    
    def get_default_cache():
        """
        Return the ``PYTHON_EGG_CACHE`` environment variable
        or a platform-relevant user cache dir for an app
        named "Python-Eggs".
        """
        return (
            os.environ.get('PYTHON_EGG_CACHE')
            or appdirs.user_cache_dir(appname='Python-Eggs')
        )
    
    
    def safe_name(name):
        """Convert an arbitrary string to a standard distribution name
    
        Any runs of non-alphanumeric/. characters are replaced with a single '-'.
        """
        return re.sub('[^A-Za-z0-9.]+', '-', name)
    
    
    def safe_version(version):
        """
        Convert an arbitrary string to a standard version string
        """
        try:
            # normalize the version
            return str(packaging.version.Version(version))
        except packaging.version.InvalidVersion:
            version = version.replace(' ', '.')
            return re.sub('[^A-Za-z0-9.]+', '-', version)
    
    
    def safe_extra(extra):
        """Convert an arbitrary string to a standard 'extra' name
    
        Any runs of non-alphanumeric characters are replaced with a single '_',
        and the result is always lowercased.
        """
        return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
    
    
    def to_filename(name):
        """Convert a project or version name to its filename-escaped form
    
        Any '-' characters are currently replaced with '_'.
        """
        return name.replace('-', '_')
    
    
    def invalid_marker(text):
        """
        Validate text as a PEP 508 environment marker; return an exception
        if invalid or False otherwise.
        """
        try:
            evaluate_marker(text)
        except SyntaxError as e:
            e.filename = None
            e.lineno = None
            return e
        return False
    
    
    def evaluate_marker(text, extra=None):
        """
        Evaluate a PEP 508 environment marker.
        Return a boolean indicating the marker result in this environment.
        Raise SyntaxError if marker is invalid.
    
        This implementation uses the 'pyparsing' module.
        """
        try:
            marker = packaging.markers.Marker(text)
            return marker.evaluate()
        except packaging.markers.InvalidMarker as e:
            raise SyntaxError(e)
    
    
    class NullProvider:
        """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
    
        egg_name = None
        egg_info = None
        loader = None
    
        def __init__(self, module):
            self.loader = getattr(module, '__loader__', None)
            self.module_path = os.path.dirname(getattr(module, '__file__', ''))
    
        def get_resource_filename(self, manager, resource_name):
            return self._fn(self.module_path, resource_name)
    
        def get_resource_stream(self, manager, resource_name):
            return io.BytesIO(self.get_resource_string(manager, resource_name))
    
        def get_resource_string(self, manager, resource_name):
            return self._get(self._fn(self.module_path, resource_name))
    
        def has_resource(self, resource_name):
            return self._has(self._fn(self.module_path, resource_name))
    
        def has_metadata(self, name):
            return self.egg_info and self._has(self._fn(self.egg_info, name))
    
        def get_metadata(self, name):
            if not self.egg_info:
                return ""
            value = self._get(self._fn(self.egg_info, name))
            return value.decode('utf-8') if six.PY3 else value
    
        def get_metadata_lines(self, name):
            return yield_lines(self.get_metadata(name))
    
        def resource_isdir(self, resource_name):
            return self._isdir(self._fn(self.module_path, resource_name))
    
        def metadata_isdir(self, name):
            return self.egg_info and self._isdir(self._fn(self.egg_info, name))
    
        def resource_listdir(self, resource_name):
            return self._listdir(self._fn(self.module_path, resource_name))
    
        def metadata_listdir(self, name):
            if self.egg_info:
                return self._listdir(self._fn(self.egg_info, name))
            return []
    
        def run_script(self, script_name, namespace):
            script = 'scripts/' + script_name
            if not self.has_metadata(script):
                raise ResolutionError(
                    "Script {script!r} not found in metadata at {self.egg_info!r}"
                    .format(**locals()),
                )
            script_text = self.get_metadata(script).replace('\r\n', '\n')
            script_text = script_text.replace('\r', '\n')
            script_filename = self._fn(self.egg_info, script)
            namespace['__file__'] = script_filename
            if os.path.exists(script_filename):
                source = open(script_filename).read()
                code = compile(source, script_filename, 'exec')
                exec(code, namespace, namespace)
            else:
                from linecache import cache
                cache[script_filename] = (
                    len(script_text), 0, script_text.split('\n'), script_filename
                )
                script_code = compile(script_text, script_filename, 'exec')
                exec(script_code, namespace, namespace)
    
        def _has(self, path):
            raise NotImplementedError(
                "Can't perform this operation for unregistered loader type"
            )
    
        def _isdir(self, path):
            raise NotImplementedError(
                "Can't perform this operation for unregistered loader type"
            )
    
        def _listdir(self, path):
            raise NotImplementedError(
                "Can't perform this operation for unregistered loader type"
            )
    
        def _fn(self, base, resource_name):
            if resource_name:
                return os.path.join(base, *resource_name.split('/'))
            return base
    
        def _get(self, path):
            if hasattr(self.loader, 'get_data'):
                return self.loader.get_data(path)
            raise NotImplementedError(
                "Can't perform this operation for loaders without 'get_data()'"
            )
    
    
    register_loader_type(object, NullProvider)
    
    
    class EggProvider(NullProvider):
        """Provider based on a virtual filesystem"""
    
        def __init__(self, module):
            NullProvider.__init__(self, module)
            self._setup_prefix()
    
        def _setup_prefix(self):
            # we assume here that our metadata may be nested inside a "basket"
            # of multiple eggs; that's why we use module_path instead of .archive
            path = self.module_path
            old = None
            while path != old:
                if _is_egg_path(path):
                    self.egg_name = os.path.basename(path)
                    self.egg_info = os.path.join(path, 'EGG-INFO')
                    self.egg_root = path
                    break
                old = path
                path, base = os.path.split(path)
    
    
    class DefaultProvider(EggProvider):
        """Provides access to package resources in the filesystem"""
    
        def _has(self, path):
            return os.path.exists(path)
    
        def _isdir(self, path):
            return os.path.isdir(path)
    
        def _listdir(self, path):
            return os.listdir(path)
    
        def get_resource_stream(self, manager, resource_name):
            return open(self._fn(self.module_path, resource_name), 'rb')
    
        def _get(self, path):
            with open(path, 'rb') as stream:
                return stream.read()
    
        @classmethod
        def _register(cls):
            loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
            for name in loader_names:
                loader_cls = getattr(importlib_machinery, name, type(None))
                register_loader_type(loader_cls, cls)
    
    
    DefaultProvider._register()
    
    
    class EmptyProvider(NullProvider):
        """Provider that returns nothing for all requests"""
    
        module_path = None
    
        _isdir = _has = lambda self, path: False
    
        def _get(self, path):
            return ''
    
        def _listdir(self, path):
            return []
    
        def __init__(self):
            pass
    
    
    empty_provider = EmptyProvider()
    
    
    class ZipManifests(dict):
        """
        zip manifest builder
        """
    
        @classmethod
        def build(cls, path):
            """
            Build a dictionary similar to the zipimport directory
            caches, except instead of tuples, store ZipInfo objects.
    
            Use a platform-specific path separator (os.sep) for the path keys
            for compatibility with pypy on Windows.
            """
            with zipfile.ZipFile(path) as zfile:
                items = (
                    (
                        name.replace('/', os.sep),
                        zfile.getinfo(name),
                    )
                    for name in zfile.namelist()
                )
                return dict(items)
    
        load = build
    
    
    class MemoizedZipManifests(ZipManifests):
        """
        Memoized zipfile manifests.
        """
        manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
    
        def load(self, path):
            """
            Load a manifest at path or return a suitable manifest already loaded.
            """
            path = os.path.normpath(path)
            mtime = os.stat(path).st_mtime
    
            if path not in self or self[path].mtime != mtime:
                manifest = self.build(path)
                self[path] = self.manifest_mod(manifest, mtime)
    
            return self[path].manifest
    
    
    class ZipProvider(EggProvider):
        """Resource support for zips and eggs"""
    
        eagers = None
        _zip_manifests = MemoizedZipManifests()
    
        def __init__(self, module):
            EggProvider.__init__(self, module)
            self.zip_pre = self.loader.archive + os.sep
    
        def _zipinfo_name(self, fspath):
            # Convert a virtual filename (full path to file) into a zipfile subpath
            # usable with the zipimport directory cache for our target archive
            fspath = fspath.rstrip(os.sep)
            if fspath == self.loader.archive:
                return ''
            if fspath.startswith(self.zip_pre):
                return fspath[len(self.zip_pre):]
            raise AssertionError(
                "%s is not a subpath of %s" % (fspath, self.zip_pre)
            )
    
        def _parts(self, zip_path):
            # Convert a zipfile subpath into an egg-relative path part list.
            # pseudo-fs path
            fspath = self.zip_pre + zip_path
            if fspath.startswith(self.egg_root + os.sep):
                return fspath[len(self.egg_root) + 1:].split(os.sep)
            raise AssertionError(
                "%s is not a subpath of %s" % (fspath, self.egg_root)
            )
    
        @property
        def zipinfo(self):
            return self._zip_manifests.load(self.loader.archive)
    
        def get_resource_filename(self, manager, resource_name):
            if not self.egg_name:
                raise NotImplementedError(
                    "resource_filename() only supported for .egg, not .zip"
                )
            # no need to lock for extraction, since we use temp names
            zip_path = self._resource_to_zip(resource_name)
            eagers = self._get_eager_resources()
            if '/'.join(self._parts(zip_path)) in eagers:
                for name in eagers:
                    self._extract_resource(manager, self._eager_to_zip(name))
            return self._extract_resource(manager, zip_path)
    
        @staticmethod
        def _get_date_and_size(zip_stat):
            size = zip_stat.file_size
            # ymdhms+wday, yday, dst
            date_time = zip_stat.date_time + (0, 0, -1)
            # 1980 offset already done
            timestamp = time.mktime(date_time)
            return timestamp, size
    
        def _extract_resource(self, manager, zip_path):
    
            if zip_path in self._index():
                for name in self._index()[zip_path]:
                    last = self._extract_resource(
                        manager, os.path.join(zip_path, name)
                    )
                # return the extracted directory name
                return os.path.dirname(last)
    
            timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
    
            if not WRITE_SUPPORT:
                raise IOError('"os.rename" and "os.unlink" are not supported '
                              'on this platform')
            try:
    
                real_path = manager.get_cache_path(
                    self.egg_name, self._parts(zip_path)
                )
    
                if self._is_current(real_path, zip_path):
                    return real_path
    
                outf, tmpnam = _mkstemp(
                    ".$extract",
                    dir=os.path.dirname(real_path),
                )
                os.write(outf, self.loader.get_data(zip_path))
                os.close(outf)
                utime(tmpnam, (timestamp, timestamp))
                manager.postprocess(tmpnam, real_path)
    
                try:
                    rename(tmpnam, real_path)
    
                except os.error:
                    if os.path.isfile(real_path):
                        if self._is_current(real_path, zip_path):
                            # the file became current since it was checked above,
                            #  so proceed.
                            return real_path
                        # Windows, del old file and retry
                        elif os.name == 'nt':
                            unlink(real_path)
                            rename(tmpnam, real_path)
                            return real_path
                    raise
    
            except os.error:
                # report a user-friendly error
                manager.extraction_error()
    
            return real_path
    
        def _is_current(self, file_path, zip_path):
            """
            Return True if the file_path is current for this zip_path
            """
            timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
            if not os.path.isfile(file_path):
                return False
            stat = os.stat(file_path)
            if stat.st_size != size or stat.st_mtime != timestamp:
                return False
            # check that the contents match
            zip_contents = self.loader.get_data(zip_path)
            with open(file_path, 'rb') as f:
                file_contents = f.read()
            return zip_contents == file_contents
    
        def _get_eager_resources(self):
            if self.eagers is None:
                eagers = []
                for name in ('native_libs.txt', 'eager_resources.txt'):
                    if self.has_metadata(name):
                        eagers.extend(self.get_metadata_lines(name))
                self.eagers = eagers
            return self.eagers
    
        def _index(self):
            try:
                return self._dirindex
            except AttributeError:
                ind = {}
                for path in self.zipinfo:
                    parts = path.split(os.sep)
                    while parts:
                        parent = os.sep.join(parts[:-1])
                        if parent in ind:
                            ind[parent].append(parts[-1])
                            break
                        else:
                            ind[parent] = [parts.pop()]
                self._dirindex = ind
                return ind
    
        def _has(self, fspath):
            zip_path = self._zipinfo_name(fspath)
            return zip_path in self.zipinfo or zip_path in self._index()
    
        def _isdir(self, fspath):
            return self._zipinfo_name(fspath) in self._index()
    
        def _listdir(self, fspath):
            return list(self._index().get(self._zipinfo_name(fspath), ()))
    
        def _eager_to_zip(self, resource_name):
            return self._zipinfo_name(self._fn(self.egg_root, resource_name))
    
        def _resource_to_zip(self, resource_name):
            return self._zipinfo_name(self._fn(self.module_path, resource_name))
    
    
    register_loader_type(zipimport.zipimporter, ZipProvider)
    
    
    class FileMetadata(EmptyProvider):
        """Metadata handler for standalone PKG-INFO files
    
        Usage::
    
            metadata = FileMetadata("/path/to/PKG-INFO")
    
        This provider rejects all data and metadata requests except for PKG-INFO,
        which is treated as existing, and will be the contents of the file at
        the provided location.
        """
    
        def __init__(self, path):
            self.path = path
    
        def has_metadata(self, name):
            return name == 'PKG-INFO' and os.path.isfile(self.path)
    
        def get_metadata(self, name):
            if name != 'PKG-INFO':
                raise KeyError("No metadata except PKG-INFO is available")
    
            with io.open(self.path, encoding='utf-8', errors="replace") as f:
                metadata = f.read()
            self._warn_on_replacement(metadata)
            return metadata
    
        def _warn_on_replacement(self, metadata):
            # Python 2.7 compat for: replacement_char = '�'
            replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
            if replacement_char in metadata:
                tmpl = "{self.path} could not be properly decoded in UTF-8"
                msg = tmpl.format(**locals())
                warnings.warn(msg)
    
        def get_metadata_lines(self, name):
            return yield_lines(self.get_metadata(name))
    
    
    class PathMetadata(DefaultProvider):
        """Metadata provider for egg directories
    
        Usage::
    
            # Development eggs:
    
            egg_info = "/path/to/PackageName.egg-info"
            base_dir = os.path.dirname(egg_info)
            metadata = PathMetadata(base_dir, egg_info)
            dist_name = os.path.splitext(os.path.basename(egg_info))[0]
            dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
    
            # Unpacked egg directories:
    
            egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
            metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
            dist = Distribution.from_filename(egg_path, metadata=metadata)
        """
    
        def __init__(self, path, egg_info):
            self.module_path = path
            self.egg_info = egg_info
    
    
    class EggMetadata(ZipProvider):
        """Metadata provider for .egg files"""
    
        def __init__(self, importer):
            """Create a metadata provider from a zipimporter"""
    
            self.zip_pre = importer.archive + os.sep
            self.loader = importer
            if importer.prefix:
                self.module_path = os.path.join(importer.archive, importer.prefix)
            else:
                self.module_path = importer.archive
            self._setup_prefix()
    
    
    _declare_state('dict', _distribution_finders={})
    
    
    def register_finder(importer_type, distribution_finder):
        """Register `distribution_finder` to find distributions in sys.path items
    
        `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
        handler), and `distribution_finder` is a callable that, passed a path
        item and the importer instance, yields ``Distribution`` instances found on
        that path item.  See ``pkg_resources.find_on_path`` for an example."""
        _distribution_finders[importer_type] = distribution_finder
    
    
    def find_distributions(path_item, only=False):
        """Yield distributions accessible via `path_item`"""
        importer = get_importer(path_item)
        finder = _find_adapter(_distribution_finders, importer)
        return finder(importer, path_item, only)
    
    
    def find_eggs_in_zip(importer, path_item, only=False):
        """
        Find eggs in zip files; possibly multiple nested eggs.
        """
        if importer.archive.endswith('.whl'):
            # wheels are not supported with this finder
            # they don't have PKG-INFO metadata, and won't ever contain eggs
            return
        metadata = EggMetadata(importer)
        if metadata.has_metadata('PKG-INFO'):
            yield Distribution.from_filename(path_item, metadata=metadata)
        if only:
            # don't yield nested distros
            return
        for subitem in metadata.resource_listdir('/'):
            if _is_egg_path(subitem):
                subpath = os.path.join(path_item, subitem)
                dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
                for dist in dists:
                    yield dist
            elif subitem.lower().endswith('.dist-info'):
                subpath = os.path.join(path_item, subitem)
                submeta = EggMetadata(zipimport.zipimporter(subpath))
                submeta.egg_info = subpath
                yield Distribution.from_location(path_item, subitem, submeta)
    
    
    register_finder(zipimport.zipimporter, find_eggs_in_zip)
    
    
    def find_nothing(importer, path_item, only=False):
        return ()
    
    
    register_finder(object, find_nothing)
    
    
    def _by_version_descending(names):
        """
        Given a list of filenames, return them in descending order
        by version number.
    
        >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
        >>> _by_version_descending(names)
        ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
        >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
        >>> _by_version_descending(names)
        ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
        >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
        >>> _by_version_descending(names)
        ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
        """
        def _by_version(name):
            """
            Parse each component of the filename
            """
            name, ext = os.path.splitext(name)
            parts = itertools.chain(name.split('-'), [ext])
            return [packaging.version.parse(part) for part in parts]
    
        return sorted(names, key=_by_version, reverse=True)
    
    
    def find_on_path(importer, path_item, only=False):
        """Yield distributions accessible on a sys.path directory"""
        path_item = _normalize_cached(path_item)
    
        if _is_unpacked_egg(path_item):
            yield Distribution.from_filename(
                path_item, metadata=PathMetadata(
                    path_item, os.path.join(path_item, 'EGG-INFO')
                )
            )
            return
    
        entries = safe_listdir(path_item)
    
        # for performance, before sorting by version,
        # screen entries for only those that will yield
        # distributions
        filtered = (
            entry
            for entry in entries
            if dist_factory(path_item, entry, only)
        )
    
        # scan for .egg and .egg-info in directory
        path_item_entries = _by_version_descending(filtered)
        for entry in path_item_entries:
            fullpath = os.path.join(path_item, entry)
            factory = dist_factory(path_item, entry, only)
            for dist in factory(fullpath):
                yield dist
    
    
    def dist_factory(path_item, entry, only):
        """
        Return a dist_factory for a path_item and entry
        """
        lower = entry.lower()
        is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
        return (
            distributions_from_metadata
            if is_meta else
            find_distributions
            if not only and _is_egg_path(entry) else
            resolve_egg_link
            if not only and lower.endswith('.egg-link') else
            NoDists()
        )
    
    
    class NoDists:
        """
        >>> bool(NoDists())
        False
    
        >>> list(NoDists()('anything'))
        []
        """
        def __bool__(self):
            return False
        if six.PY2:
            __nonzero__ = __bool__
    
        def __call__(self, fullpath):
            return iter(())
    
    
    def safe_listdir(path):
        """
        Attempt to list contents of path, but suppress some exceptions.
        """
        try:
            return os.listdir(path)
        except (PermissionError, NotADirectoryError):
            pass
        except OSError as e:
            # Ignore the directory if does not exist, not a directory or
            # permission denied
            ignorable = (
                e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
                # Python 2 on Windows needs to be handled this way :(
                or getattr(e, "winerror", None) == 267
            )
            if not ignorable:
                raise
        return ()
    
    
    def distributions_from_metadata(path):
        root = os.path.dirname(path)
        if os.path.isdir(path):
            if len(os.listdir(path)) == 0:
                # empty metadata dir; skip
                return
            metadata = PathMetadata(root, path)
        else:
            metadata = FileMetadata(path)
        entry = os.path.basename(path)
        yield Distribution.from_location(
            root, entry, metadata, precedence=DEVELOP_DIST,
        )
    
    
    def non_empty_lines(path):
        """
        Yield non-empty lines from file at path
        """
        with open(path) as f:
            for line in f:
                line = line.strip()
                if line:
                    yield line
    
    
    def resolve_egg_link(path):
        """
        Given a path to an .egg-link, resolve distributions
        present in the referenced path.
        """
        referenced_paths = non_empty_lines(path)
        resolved_paths = (
            os.path.join(os.path.dirname(path), ref)
            for ref in referenced_paths
        )
        dist_groups = map(find_distributions, resolved_paths)
        return next(dist_groups, ())
    
    
    register_finder(pkgutil.ImpImporter, find_on_path)
    
    if hasattr(importlib_machinery, 'FileFinder'):
        register_finder(importlib_machinery.FileFinder, find_on_path)
    
    _declare_state('dict', _namespace_handlers={})
    _declare_state('dict', _namespace_packages={})
    
    
    def register_namespace_handler(importer_type, namespace_handler):
        """Register `namespace_handler` to declare namespace packages
    
        `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
        handler), and `namespace_handler` is a callable like this::
    
            def namespace_handler(importer, path_entry, moduleName, module):
                # return a path_entry to use for child packages
    
        Namespace handlers are only called if the importer object has already
        agreed that it can handle the relevant path item, and they should only
        return a subpath if the module __path__ does not already contain an
        equivalent subpath.  For an example namespace handler, see
        ``pkg_resources.file_ns_handler``.
        """
        _namespace_handlers[importer_type] = namespace_handler
    
    
    def _handle_ns(packageName, path_item):
        """Ensure that named package includes a subpath of path_item (if needed)"""
    
        importer = get_importer(path_item)
        if importer is None:
            return None
        loader = importer.find_module(packageName)
        if loader is None:
            return None
        module = sys.modules.get(packageName)
        if module is None:
            module = sys.modules[packageName] = types.ModuleType(packageName)
            module.__path__ = []
            _set_parent_ns(packageName)
        elif not hasattr(module, '__path__'):
            raise TypeError("Not a package:", packageName)
        handler = _find_adapter(_namespace_handlers, importer)
        subpath = handler(importer, path_item, packageName, module)
        if subpath is not None:
            path = module.__path__
            path.append(subpath)
            loader.load_module(packageName)
            _rebuild_mod_path(path, packageName, module)
        return subpath
    
    
    def _rebuild_mod_path(orig_path, package_name, module):
        """
        Rebuild module.__path__ ensuring that all entries are ordered
        corresponding to their sys.path order
        """
        sys_path = [_normalize_cached(p) for p in sys.path]
    
        def safe_sys_path_index(entry):
            """
            Workaround for #520 and #513.
            """
            try:
                return sys_path.index(entry)
            except ValueError:
                return float('inf')
    
        def position_in_sys_path(path):
            """
            Return the ordinal of the path based on its position in sys.path
            """
            path_parts = path.split(os.sep)
            module_parts = package_name.count('.') + 1
            parts = path_parts[:-module_parts]
            return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
    
        if not isinstance(orig_path, list):
            # Is this behavior useful when module.__path__ is not a list?
            return
    
        orig_path.sort(key=position_in_sys_path)
        module.__path__[:] = [_normalize_cached(p) for p in orig_path]
    
    
    def declare_namespace(packageName):
        """Declare that package 'packageName' is a namespace package"""
    
        _imp.acquire_lock()
        try:
            if packageName in _namespace_packages:
                return
    
            path, parent = sys.path, None
            if '.' in packageName:
                parent = '.'.join(packageName.split('.')[:-1])
                declare_namespace(parent)
                if parent not in _namespace_packages:
                    __import__(parent)
                try:
                    path = sys.modules[parent].__path__
                except AttributeError:
                    raise TypeError("Not a package:", parent)
    
            # Track what packages are namespaces, so when new path items are added,
            # they can be updated
            _namespace_packages.setdefault(parent, []).append(packageName)
            _namespace_packages.setdefault(packageName, [])
    
            for path_item in path:
                # Ensure all the parent's path items are reflected in the child,
                # if they apply
                _handle_ns(packageName, path_item)
    
        finally:
            _imp.release_lock()
    
    
    def fixup_namespace_packages(path_item, parent=None):
        """Ensure that previously-declared namespace packages include path_item"""
        _imp.acquire_lock()
        try:
            for package in _namespace_packages.get(parent, ()):
                subpath = _handle_ns(package, path_item)
                if subpath:
                    fixup_namespace_packages(subpath, package)
        finally:
            _imp.release_lock()
    
    
    def file_ns_handler(importer, path_item, packageName, module):
        """Compute an ns-package subpath for a filesystem or zipfile importer"""
    
        subpath = os.path.join(path_item, packageName.split('.')[-1])
        normalized = _normalize_cached(subpath)
        for item in module.__path__:
            if _normalize_cached(item) == normalized:
                break
        else:
            # Only return the path if it's not already there
            return subpath
    
    
    register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
    register_namespace_handler(zipimport.zipimporter, file_ns_handler)
    
    if hasattr(importlib_machinery, 'FileFinder'):
        register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
    
    
    def null_ns_handler(importer, path_item, packageName, module):
        return None
    
    
    register_namespace_handler(object, null_ns_handler)
    
    
    def normalize_path(filename):
        """Normalize a file/dir name for comparison purposes"""
        return os.path.normcase(os.path.realpath(filename))
    
    
    def _normalize_cached(filename, _cache={}):
        try:
            return _cache[filename]
        except KeyError:
            _cache[filename] = result = normalize_path(filename)
            return result
    
    
    def _is_egg_path(path):
        """
        Determine if given path appears to be an egg.
        """
        return path.lower().endswith('.egg')
    
    
    def _is_unpacked_egg(path):
        """
        Determine if given path appears to be an unpacked egg.
        """
        return (
            _is_egg_path(path) and
            os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
        )
    
    
    def _set_parent_ns(packageName):
        parts = packageName.split('.')
        name = parts.pop()
        if parts:
            parent = '.'.join(parts)
            setattr(sys.modules[parent], name, sys.modules[packageName])
    
    
    def yield_lines(strs):
        """Yield non-empty/non-comment lines of a string or sequence"""
        if isinstance(strs, six.string_types):
            for s in strs.splitlines():
                s = s.strip()
                # skip blank lines/comments
                if s and not s.startswith('#'):
                    yield s
        else:
            for ss in strs:
                for s in yield_lines(ss):
                    yield s
    
    
    MODULE = re.compile(r"\w+(\.\w+)*$").match
    EGG_NAME = re.compile(
        r"""
        (?P[^-]+) (
            -(?P[^-]+) (
                -py(?P[^-]+) (
                    -(?P.+)
                )?
            )?
        )?
        """,
        re.VERBOSE | re.IGNORECASE,
    ).match
    
    
    class EntryPoint(object):
        """Object representing an advertised importable object"""
    
        def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
            if not MODULE(module_name):
                raise ValueError("Invalid module name", module_name)
            self.name = name
            self.module_name = module_name
            self.attrs = tuple(attrs)
            self.extras = tuple(extras)
            self.dist = dist
    
        def __str__(self):
            s = "%s = %s" % (self.name, self.module_name)
            if self.attrs:
                s += ':' + '.'.join(self.attrs)
            if self.extras:
                s += ' [%s]' % ','.join(self.extras)
            return s
    
        def __repr__(self):
            return "EntryPoint.parse(%r)" % str(self)
    
        def load(self, require=True, *args, **kwargs):
            """
            Require packages for this EntryPoint, then resolve it.
            """
            if not require or args or kwargs:
                warnings.warn(
                    "Parameters to load are deprecated.  Call .resolve and "
                    ".require separately.",
                    DeprecationWarning,
                    stacklevel=2,
                )
            if require:
                self.require(*args, **kwargs)
            return self.resolve()
    
        def resolve(self):
            """
            Resolve the entry point from its module and attrs.
            """
            module = __import__(self.module_name, fromlist=['__name__'], level=0)
            try:
                return functools.reduce(getattr, self.attrs, module)
            except AttributeError as exc:
                raise ImportError(str(exc))
    
        def require(self, env=None, installer=None):
            if self.extras and not self.dist:
                raise UnknownExtra("Can't require() without a distribution", self)
    
            # Get the requirements for this entry point with all its extras and
            # then resolve them. We have to pass `extras` along when resolving so
            # that the working set knows what extras we want. Otherwise, for
            # dist-info distributions, the working set will assume that the
            # requirements for that extra are purely optional and skip over them.
            reqs = self.dist.requires(self.extras)
            items = working_set.resolve(reqs, env, installer, extras=self.extras)
            list(map(working_set.add, items))
    
        pattern = re.compile(
            r'\s*'
            r'(?P.+?)\s*'
            r'=\s*'
            r'(?P[\w.]+)\s*'
            r'(:\s*(?P[\w.]+))?\s*'
            r'(?P\[.*\])?\s*$'
        )
    
        @classmethod
        def parse(cls, src, dist=None):
            """Parse a single entry point from string `src`
    
            Entry point syntax follows the form::
    
                name = some.module:some.attr [extra1, extra2]
    
            The entry name and module name are required, but the ``:attrs`` and
            ``[extras]`` parts are optional
            """
            m = cls.pattern.match(src)
            if not m:
                msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
                raise ValueError(msg, src)
            res = m.groupdict()
            extras = cls._parse_extras(res['extras'])
            attrs = res['attr'].split('.') if res['attr'] else ()
            return cls(res['name'], res['module'], attrs, extras, dist)
    
        @classmethod
        def _parse_extras(cls, extras_spec):
            if not extras_spec:
                return ()
            req = Requirement.parse('x' + extras_spec)
            if req.specs:
                raise ValueError()
            return req.extras
    
        @classmethod
        def parse_group(cls, group, lines, dist=None):
            """Parse an entry point group"""
            if not MODULE(group):
                raise ValueError("Invalid group name", group)
            this = {}
            for line in yield_lines(lines):
                ep = cls.parse(line, dist)
                if ep.name in this:
                    raise ValueError("Duplicate entry point", group, ep.name)
                this[ep.name] = ep
            return this
    
        @classmethod
        def parse_map(cls, data, dist=None):
            """Parse a map of entry point groups"""
            if isinstance(data, dict):
                data = data.items()
            else:
                data = split_sections(data)
            maps = {}
            for group, lines in data:
                if group is None:
                    if not lines:
                        continue
                    raise ValueError("Entry points must be listed in groups")
                group = group.strip()
                if group in maps:
                    raise ValueError("Duplicate group name", group)
                maps[group] = cls.parse_group(group, lines, dist)
            return maps
    
    
    def _remove_md5_fragment(location):
        if not location:
            return ''
        parsed = urllib.parse.urlparse(location)
        if parsed[-1].startswith('md5='):
            return urllib.parse.urlunparse(parsed[:-1] + ('',))
        return location
    
    
    def _version_from_file(lines):
        """
        Given an iterable of lines from a Metadata file, return
        the value of the Version field, if present, or None otherwise.
        """
        def is_version_line(line):
            return line.lower().startswith('version:')
        version_lines = filter(is_version_line, lines)
        line = next(iter(version_lines), '')
        _, _, value = line.partition(':')
        return safe_version(value.strip()) or None
    
    
    class Distribution(object):
        """Wrap an actual or potential sys.path entry w/metadata"""
        PKG_INFO = 'PKG-INFO'
    
        def __init__(
                self, location=None, metadata=None, project_name=None,
                version=None, py_version=PY_MAJOR, platform=None,
                precedence=EGG_DIST):
            self.project_name = safe_name(project_name or 'Unknown')
            if version is not None:
                self._version = safe_version(version)
            self.py_version = py_version
            self.platform = platform
            self.location = location
            self.precedence = precedence
            self._provider = metadata or empty_provider
    
        @classmethod
        def from_location(cls, location, basename, metadata=None, **kw):
            project_name, version, py_version, platform = [None] * 4
            basename, ext = os.path.splitext(basename)
            if ext.lower() in _distributionImpl:
                cls = _distributionImpl[ext.lower()]
    
                match = EGG_NAME(basename)
                if match:
                    project_name, version, py_version, platform = match.group(
                        'name', 'ver', 'pyver', 'plat'
                    )
            return cls(
                location, metadata, project_name=project_name, version=version,
                py_version=py_version, platform=platform, **kw
            )._reload_version()
    
        def _reload_version(self):
            return self
    
        @property
        def hashcmp(self):
            return (
                self.parsed_version,
                self.precedence,
                self.key,
                _remove_md5_fragment(self.location),
                self.py_version or '',
                self.platform or '',
            )
    
        def __hash__(self):
            return hash(self.hashcmp)
    
        def __lt__(self, other):
            return self.hashcmp < other.hashcmp
    
        def __le__(self, other):
            return self.hashcmp <= other.hashcmp
    
        def __gt__(self, other):
            return self.hashcmp > other.hashcmp
    
        def __ge__(self, other):
            return self.hashcmp >= other.hashcmp
    
        def __eq__(self, other):
            if not isinstance(other, self.__class__):
                # It's not a Distribution, so they are not equal
                return False
            return self.hashcmp == other.hashcmp
    
        def __ne__(self, other):
            return not self == other
    
        # These properties have to be lazy so that we don't have to load any
        # metadata until/unless it's actually needed.  (i.e., some distributions
        # may not know their name or version without loading PKG-INFO)
    
        @property
        def key(self):
            try:
                return self._key
            except AttributeError:
                self._key = key = self.project_name.lower()
                return key
    
        @property
        def parsed_version(self):
            if not hasattr(self, "_parsed_version"):
                self._parsed_version = parse_version(self.version)
    
            return self._parsed_version
    
        def _warn_legacy_version(self):
            LV = packaging.version.LegacyVersion
            is_legacy = isinstance(self._parsed_version, LV)
            if not is_legacy:
                return
    
            # While an empty version is technically a legacy version and
            # is not a valid PEP 440 version, it's also unlikely to
            # actually come from someone and instead it is more likely that
            # it comes from setuptools attempting to parse a filename and
            # including it in the list. So for that we'll gate this warning
            # on if the version is anything at all or not.
            if not self.version:
                return
    
            tmpl = textwrap.dedent("""
                '{project_name} ({version})' is being parsed as a legacy,
                non PEP 440,
                version. You may find odd behavior and sort order.
                In particular it will be sorted as less than 0.0. It
                is recommended to migrate to PEP 440 compatible
                versions.
                """).strip().replace('\n', ' ')
    
            warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
    
        @property
        def version(self):
            try:
                return self._version
            except AttributeError:
                version = _version_from_file(self._get_metadata(self.PKG_INFO))
                if version is None:
                    tmpl = "Missing 'Version:' header and/or %s file"
                    raise ValueError(tmpl % self.PKG_INFO, self)
                return version
    
        @property
        def _dep_map(self):
            """
            A map of extra to its list of (direct) requirements
            for this distribution, including the null extra.
            """
            try:
                return self.__dep_map
            except AttributeError:
                self.__dep_map = self._filter_extras(self._build_dep_map())
            return self.__dep_map
    
        @staticmethod
        def _filter_extras(dm):
            """
            Given a mapping of extras to dependencies, strip off
            environment markers and filter out any dependencies
            not matching the markers.
            """
            for extra in list(filter(None, dm)):
                new_extra = extra
                reqs = dm.pop(extra)
                new_extra, _, marker = extra.partition(':')
                fails_marker = marker and (
                    invalid_marker(marker)
                    or not evaluate_marker(marker)
                )
                if fails_marker:
                    reqs = []
                new_extra = safe_extra(new_extra) or None
    
                dm.setdefault(new_extra, []).extend(reqs)
            return dm
    
        def _build_dep_map(self):
            dm = {}
            for name in 'requires.txt', 'depends.txt':
                for extra, reqs in split_sections(self._get_metadata(name)):
                    dm.setdefault(extra, []).extend(parse_requirements(reqs))
            return dm
    
        def requires(self, extras=()):
            """List of Requirements needed for this distro if `extras` are used"""
            dm = self._dep_map
            deps = []
            deps.extend(dm.get(None, ()))
            for ext in extras:
                try:
                    deps.extend(dm[safe_extra(ext)])
                except KeyError:
                    raise UnknownExtra(
                        "%s has no such extra feature %r" % (self, ext)
                    )
            return deps
    
        def _get_metadata(self, name):
            if self.has_metadata(name):
                for line in self.get_metadata_lines(name):
                    yield line
    
        def activate(self, path=None, replace=False):
            """Ensure distribution is importable on `path` (default=sys.path)"""
            if path is None:
                path = sys.path
            self.insert_on(path, replace=replace)
            if path is sys.path:
                fixup_namespace_packages(self.location)
                for pkg in self._get_metadata('namespace_packages.txt'):
                    if pkg in sys.modules:
                        declare_namespace(pkg)
    
        def egg_name(self):
            """Return what this distribution's standard .egg filename should be"""
            filename = "%s-%s-py%s" % (
                to_filename(self.project_name), to_filename(self.version),
                self.py_version or PY_MAJOR
            )
    
            if self.platform:
                filename += '-' + self.platform
            return filename
    
        def __repr__(self):
            if self.location:
                return "%s (%s)" % (self, self.location)
            else:
                return str(self)
    
        def __str__(self):
            try:
                version = getattr(self, 'version', None)
            except ValueError:
                version = None
            version = version or "[unknown version]"
            return "%s %s" % (self.project_name, version)
    
        def __getattr__(self, attr):
            """Delegate all unrecognized public attributes to .metadata provider"""
            if attr.startswith('_'):
                raise AttributeError(attr)
            return getattr(self._provider, attr)
    
        def __dir__(self):
            return list(
                set(super(Distribution, self).__dir__())
                | set(
                    attr for attr in self._provider.__dir__()
                    if not attr.startswith('_')
                )
            )
    
        if not hasattr(object, '__dir__'):
            # python 2.7 not supported
            del __dir__
    
        @classmethod
        def from_filename(cls, filename, metadata=None, **kw):
            return cls.from_location(
                _normalize_cached(filename), os.path.basename(filename), metadata,
                **kw
            )
    
        def as_requirement(self):
            """Return a ``Requirement`` that matches this distribution exactly"""
            if isinstance(self.parsed_version, packaging.version.Version):
                spec = "%s==%s" % (self.project_name, self.parsed_version)
            else:
                spec = "%s===%s" % (self.project_name, self.parsed_version)
    
            return Requirement.parse(spec)
    
        def load_entry_point(self, group, name):
            """Return the `name` entry point of `group` or raise ImportError"""
            ep = self.get_entry_info(group, name)
            if ep is None:
                raise ImportError("Entry point %r not found" % ((group, name),))
            return ep.load()
    
        def get_entry_map(self, group=None):
            """Return the entry point map for `group`, or the full entry map"""
            try:
                ep_map = self._ep_map
            except AttributeError:
                ep_map = self._ep_map = EntryPoint.parse_map(
                    self._get_metadata('entry_points.txt'), self
                )
            if group is not None:
                return ep_map.get(group, {})
            return ep_map
    
        def get_entry_info(self, group, name):
            """Return the EntryPoint object for `group`+`name`, or ``None``"""
            return self.get_entry_map(group).get(name)
    
        def insert_on(self, path, loc=None, replace=False):
            """Ensure self.location is on path
    
            If replace=False (default):
                - If location is already in path anywhere, do nothing.
                - Else:
                  - If it's an egg and its parent directory is on path,
                    insert just ahead of the parent.
                  - Else: add to the end of path.
            If replace=True:
                - If location is already on path anywhere (not eggs)
                  or higher priority than its parent (eggs)
                  do nothing.
                - Else:
                  - If it's an egg and its parent directory is on path,
                    insert just ahead of the parent,
                    removing any lower-priority entries.
                  - Else: add it to the front of path.
            """
    
            loc = loc or self.location
            if not loc:
                return
    
            nloc = _normalize_cached(loc)
            bdir = os.path.dirname(nloc)
            npath = [(p and _normalize_cached(p) or p) for p in path]
    
            for p, item in enumerate(npath):
                if item == nloc:
                    if replace:
                        break
                    else:
                        # don't modify path (even removing duplicates) if
                        # found and not replace
                        return
                elif item == bdir and self.precedence == EGG_DIST:
                    # if it's an .egg, give it precedence over its directory
                    # UNLESS it's already been added to sys.path and replace=False
                    if (not replace) and nloc in npath[p:]:
                        return
                    if path is sys.path:
                        self.check_version_conflict()
                    path.insert(p, loc)
                    npath.insert(p, nloc)
                    break
            else:
                if path is sys.path:
                    self.check_version_conflict()
                if replace:
                    path.insert(0, loc)
                else:
                    path.append(loc)
                return
    
            # p is the spot where we found or inserted loc; now remove duplicates
            while True:
                try:
                    np = npath.index(nloc, p + 1)
                except ValueError:
                    break
                else:
                    del npath[np], path[np]
                    # ha!
                    p = np
    
            return
    
        def check_version_conflict(self):
            if self.key == 'setuptools':
                # ignore the inevitable setuptools self-conflicts  :(
                return
    
            nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
            loc = normalize_path(self.location)
            for modname in self._get_metadata('top_level.txt'):
                if (modname not in sys.modules or modname in nsp
                        or modname in _namespace_packages):
                    continue
                if modname in ('pkg_resources', 'setuptools', 'site'):
                    continue
                fn = getattr(sys.modules[modname], '__file__', None)
                if fn and (normalize_path(fn).startswith(loc) or
                           fn.startswith(self.location)):
                    continue
                issue_warning(
                    "Module %s was already imported from %s, but %s is being added"
                    " to sys.path" % (modname, fn, self.location),
                )
    
        def has_version(self):
            try:
                self.version
            except ValueError:
                issue_warning("Unbuilt egg for " + repr(self))
                return False
            return True
    
        def clone(self, **kw):
            """Copy this distribution, substituting in any changed keyword args"""
            names = 'project_name version py_version platform location precedence'
            for attr in names.split():
                kw.setdefault(attr, getattr(self, attr, None))
            kw.setdefault('metadata', self._provider)
            return self.__class__(**kw)
    
        @property
        def extras(self):
            return [dep for dep in self._dep_map if dep]
    
    
    class EggInfoDistribution(Distribution):
        def _reload_version(self):
            """
            Packages installed by distutils (e.g. numpy or scipy),
            which uses an old safe_version, and so
            their version numbers can get mangled when
            converted to filenames (e.g., 1.11.0.dev0+2329eae to
            1.11.0.dev0_2329eae). These distributions will not be
            parsed properly
            downstream by Distribution and safe_version, so
            take an extra step and try to get the version number from
            the metadata file itself instead of the filename.
            """
            md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
            if md_version:
                self._version = md_version
            return self
    
    
    class DistInfoDistribution(Distribution):
        """
        Wrap an actual or potential sys.path entry
        w/metadata, .dist-info style.
        """
        PKG_INFO = 'METADATA'
        EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
    
        @property
        def _parsed_pkg_info(self):
            """Parse and cache metadata"""
            try:
                return self._pkg_info
            except AttributeError:
                metadata = self.get_metadata(self.PKG_INFO)
                self._pkg_info = email.parser.Parser().parsestr(metadata)
                return self._pkg_info
    
        @property
        def _dep_map(self):
            try:
                return self.__dep_map
            except AttributeError:
                self.__dep_map = self._compute_dependencies()
                return self.__dep_map
    
        def _compute_dependencies(self):
            """Recompute this distribution's dependencies."""
            dm = self.__dep_map = {None: []}
    
            reqs = []
            # Including any condition expressions
            for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
                reqs.extend(parse_requirements(req))
    
            def reqs_for_extra(extra):
                for req in reqs:
                    if not req.marker or req.marker.evaluate({'extra': extra}):
                        yield req
    
            common = frozenset(reqs_for_extra(None))
            dm[None].extend(common)
    
            for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
                s_extra = safe_extra(extra.strip())
                dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
    
            return dm
    
    
    _distributionImpl = {
        '.egg': Distribution,
        '.egg-info': EggInfoDistribution,
        '.dist-info': DistInfoDistribution,
    }
    
    
    def issue_warning(*args, **kw):
        level = 1
        g = globals()
        try:
            # find the first stack frame that is *not* code in
            # the pkg_resources module, to use for the warning
            while sys._getframe(level).f_globals is g:
                level += 1
        except ValueError:
            pass
        warnings.warn(stacklevel=level + 1, *args, **kw)
    
    
    class RequirementParseError(ValueError):
        def __str__(self):
            return ' '.join(self.args)
    
    
    def parse_requirements(strs):
        """Yield ``Requirement`` objects for each specification in `strs`
    
        `strs` must be a string, or a (possibly-nested) iterable thereof.
        """
        # create a steppable iterator, so we can handle \-continuations
        lines = iter(yield_lines(strs))
    
        for line in lines:
            # Drop comments -- a hash without a space may be in a URL.
            if ' #' in line:
                line = line[:line.find(' #')]
            # If there is a line continuation, drop it, and append the next line.
            if line.endswith('\\'):
                line = line[:-2].strip()
                try:
                    line += next(lines)
                except StopIteration:
                    return
            yield Requirement(line)
    
    
    class Requirement(packaging.requirements.Requirement):
        def __init__(self, requirement_string):
            """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
            try:
                super(Requirement, self).__init__(requirement_string)
            except packaging.requirements.InvalidRequirement as e:
                raise RequirementParseError(str(e))
            self.unsafe_name = self.name
            project_name = safe_name(self.name)
            self.project_name, self.key = project_name, project_name.lower()
            self.specs = [
                (spec.operator, spec.version) for spec in self.specifier]
            self.extras = tuple(map(safe_extra, self.extras))
            self.hashCmp = (
                self.key,
                self.specifier,
                frozenset(self.extras),
                str(self.marker) if self.marker else None,
            )
            self.__hash = hash(self.hashCmp)
    
        def __eq__(self, other):
            return (
                isinstance(other, Requirement) and
                self.hashCmp == other.hashCmp
            )
    
        def __ne__(self, other):
            return not self == other
    
        def __contains__(self, item):
            if isinstance(item, Distribution):
                if item.key != self.key:
                    return False
    
                item = item.version
    
            # Allow prereleases always in order to match the previous behavior of
            # this method. In the future this should be smarter and follow PEP 440
            # more accurately.
            return self.specifier.contains(item, prereleases=True)
    
        def __hash__(self):
            return self.__hash
    
        def __repr__(self):
            return "Requirement.parse(%r)" % str(self)
    
        @staticmethod
        def parse(s):
            req, = parse_requirements(s)
            return req
    
    
    def _always_object(classes):
        """
        Ensure object appears in the mro even
        for old-style classes.
        """
        if object not in classes:
            return classes + (object,)
        return classes
    
    
    def _find_adapter(registry, ob):
        """Return an adapter factory for `ob` from `registry`"""
        types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
        for t in types:
            if t in registry:
                return registry[t]
    
    
    def ensure_directory(path):
        """Ensure that the parent directory of `path` exists"""
        dirname = os.path.dirname(path)
        py31compat.makedirs(dirname, exist_ok=True)
    
    
    def _bypass_ensure_directory(path):
        """Sandbox-bypassing version of ensure_directory()"""
        if not WRITE_SUPPORT:
            raise IOError('"os.mkdir" not supported on this platform.')
        dirname, filename = split(path)
        if dirname and filename and not isdir(dirname):
            _bypass_ensure_directory(dirname)
            mkdir(dirname, 0o755)
    
    
    def split_sections(s):
        """Split a string or iterable thereof into (section, content) pairs
    
        Each ``section`` is a stripped version of the section header ("[section]")
        and each ``content`` is a list of stripped lines excluding blank lines and
        comment-only lines.  If there are any such lines before the first section
        header, they're returned in a first ``section`` of ``None``.
        """
        section = None
        content = []
        for line in yield_lines(s):
            if line.startswith("["):
                if line.endswith("]"):
                    if section or content:
                        yield section, content
                    section = line[1:-1].strip()
                    content = []
                else:
                    raise ValueError("Invalid section heading", line)
            else:
                content.append(line)
    
        # wrap up last segment
        yield section, content
    
    
    def _mkstemp(*args, **kw):
        old_open = os.open
        try:
            # temporarily bypass sandboxing
            os.open = os_open
            return tempfile.mkstemp(*args, **kw)
        finally:
            # and then put it back
            os.open = old_open
    
    
    # Silence the PEP440Warning by default, so that end users don't get hit by it
    # randomly just because they use pkg_resources. We want to append the rule
    # because we want earlier uses of filterwarnings to take precedence over this
    # one.
    warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
    
    
    # from jaraco.functools 1.3
    def _call_aside(f, *args, **kwargs):
        f(*args, **kwargs)
        return f
    
    
    @_call_aside
    def _initialize(g=globals()):
        "Set up global resource manager (deliberately not state-saved)"
        manager = ResourceManager()
        g['_manager'] = manager
        g.update(
            (name, getattr(manager, name))
            for name in dir(manager)
            if not name.startswith('_')
        )
    
    
    @_call_aside
    def _initialize_master_working_set():
        """
        Prepare the master working set and make the ``require()``
        API available.
    
        This function has explicit effects on the global state
        of pkg_resources. It is intended to be invoked once at
        the initialization of this module.
    
        Invocation by other packages is unsupported and done
        at their own risk.
        """
        working_set = WorkingSet._build_master()
        _declare_state('object', working_set=working_set)
    
        require = working_set.require
        iter_entry_points = working_set.iter_entry_points
        add_activation_listener = working_set.subscribe
        run_script = working_set.run_script
        # backward compatibility
        run_main = run_script
        # Activate all distributions already on sys.path with replace=False and
        # ensure that all distributions added to the working set in the future
        # (e.g. by calling ``require()``) will get activated as well,
        # with higher priority (replace=True).
        tuple(
            dist.activate(replace=False)
            for dist in working_set
        )
        add_activation_listener(
            lambda dist: dist.activate(replace=True),
            existing=False,
        )
        working_set.entries = []
        # match order
        list(map(working_set.add_entry, sys.path))
        globals().update(locals())
    usr/lib/python3.6/site-packages/firewall/core/__init__.py000064400000000000151030077260017250 0ustar00usr/lib/python3.6/site-packages/firewall/config/__init__.py000064400000011404151030077520017576 0ustar00# -*- coding: utf-8 -*-
    #
    # Copyright (C) 2007-2016 Red Hat, Inc.
    # Authors:
    # Thomas Woerner 
    #
    # This program is free software; you can redistribute it and/or modify
    # it under the terms of the GNU General Public License as published by
    # the Free Software Foundation; either version 2 of the License, or
    # (at your option) any later version.
    #
    # This program is distributed in the hope that it will be useful,
    # but WITHOUT ANY WARRANTY; without even the implied warranty of
    # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    # GNU General Public License for more details.
    #
    # You should have received a copy of the GNU General Public License
    # along with this program.  If not, see .
    #
    
    from __future__ import absolute_import
    
    # translation
    import locale
    try:
        locale.setlocale(locale.LC_ALL, "")
    except locale.Error:
        import os
        os.environ['LC_ALL'] = 'C'
        locale.setlocale(locale.LC_ALL, "")
    
    DOMAIN = 'firewalld'
    import gettext
    gettext.install(domain=DOMAIN)
    
    from . import dbus # noqa: F401
    
    # configuration
    DAEMON_NAME = 'firewalld'
    CONFIG_NAME = 'firewall-config'
    APPLET_NAME = 'firewall-applet'
    DATADIR = '/usr/share/' + DAEMON_NAME
    CONFIG_GLADE_NAME = CONFIG_NAME + '.glade'
    COPYRIGHT = '(C) 2010-2017 Red Hat, Inc.'
    VERSION = '0.9.11'
    AUTHORS = [
        "Thomas Woerner ",
        "Jiri Popelka ",
        "Eric Garver ",
        ]
    LICENSE = gettext.gettext(
        "This program is free software; you can redistribute it and/or modify "
        "it under the terms of the GNU General Public License as published by "
        "the Free Software Foundation; either version 2 of the License, or "
        "(at your option) any later version.\n"
        "\n"
        "This program is distributed in the hope that it will be useful, "
        "but WITHOUT ANY WARRANTY; without even the implied warranty of "
        "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the "
        "GNU General Public License for more details.\n"
        "\n"
        "You should have received a copy of the GNU General Public License "
        "along with this program.  If not, see .")
    WEBSITE = 'http://www.firewalld.org'
    
    def set_system_config_paths(path):
        global ETC_FIREWALLD, FIREWALLD_CONF, ETC_FIREWALLD_ZONES, \
               ETC_FIREWALLD_SERVICES, ETC_FIREWALLD_ICMPTYPES, \
               ETC_FIREWALLD_IPSETS, ETC_FIREWALLD_HELPERS, \
               FIREWALLD_DIRECT, LOCKDOWN_WHITELIST, ETC_FIREWALLD_POLICIES
        ETC_FIREWALLD = path
        FIREWALLD_CONF = path + '/firewalld.conf'
        ETC_FIREWALLD_ZONES = path + '/zones'
        ETC_FIREWALLD_SERVICES = path + '/services'
        ETC_FIREWALLD_ICMPTYPES = path + '/icmptypes'
        ETC_FIREWALLD_IPSETS = path + '/ipsets'
        ETC_FIREWALLD_HELPERS = path + '/helpers'
        ETC_FIREWALLD_POLICIES = path + '/policies'
        FIREWALLD_DIRECT = path + '/direct.xml'
        LOCKDOWN_WHITELIST = path + '/lockdown-whitelist.xml'
    set_system_config_paths('/etc/firewalld')
    
    def set_default_config_paths(path):
        global USR_LIB_FIREWALLD, FIREWALLD_ZONES, FIREWALLD_SERVICES, \
               FIREWALLD_ICMPTYPES, FIREWALLD_IPSETS, FIREWALLD_HELPERS, \
               FIREWALLD_POLICIES
        USR_LIB_FIREWALLD = path
        FIREWALLD_ZONES = path + '/zones'
        FIREWALLD_SERVICES = path + '/services'
        FIREWALLD_ICMPTYPES = path + '/icmptypes'
        FIREWALLD_IPSETS = path + '/ipsets'
        FIREWALLD_HELPERS = path + '/helpers'
        FIREWALLD_POLICIES = path + '/policies'
    set_default_config_paths('/usr/lib/firewalld')
    
    FIREWALLD_LOGFILE = '/var/log/firewalld'
    
    FIREWALLD_PIDFILE = "/var/run/firewalld.pid"
    
    FIREWALLD_TEMPDIR = '/run/firewalld'
    
    SYSCONFIGDIR = '/etc/sysconfig'
    IFCFGDIR = "/etc/sysconfig/network-scripts"
    
    SYSCTL_CONFIG = '/etc/sysctl.conf'
    
    # commands used by backends
    COMMANDS = {
        "ipv4":         "/usr/sbin/iptables",
        "ipv4-restore": "/usr/sbin/iptables-restore",
        "ipv6":         "/usr/sbin/ip6tables",
        "ipv6-restore": "/usr/sbin/ip6tables-restore",
        "eb":           "/usr/sbin/ebtables",
        "eb-restore":   "/usr/sbin/ebtables-restore",
        "ipset":        "/usr/sbin/ipset",
        "modprobe":     "/sbin/modprobe",
        "rmmod":        "/sbin/rmmod",
    }
    
    LOG_DENIED_VALUES = [ "all", "unicast", "broadcast", "multicast", "off" ]
    AUTOMATIC_HELPERS_VALUES = [ "yes", "no", "system" ]
    FIREWALL_BACKEND_VALUES = [ "nftables", "iptables" ]
    
    # fallbacks: will be overloaded by firewalld.conf
    FALLBACK_ZONE = "public"
    FALLBACK_MINIMAL_MARK = 100
    FALLBACK_CLEANUP_ON_EXIT = True
    FALLBACK_CLEANUP_MODULES_ON_EXIT = True
    FALLBACK_LOCKDOWN = False
    FALLBACK_IPV6_RPFILTER = True
    FALLBACK_INDIVIDUAL_CALLS = False
    FALLBACK_LOG_DENIED = "off"
    FALLBACK_AUTOMATIC_HELPERS = "no"
    FALLBACK_FIREWALL_BACKEND = "nftables"
    FALLBACK_FLUSH_ALL_ON_RELOAD = True
    FALLBACK_RFC3964_IPV4 = True
    FALLBACK_ALLOW_ZONE_DRIFTING = True
    usr/lib/python3.6/site-packages/dnfpluginscore/__init__.py000064400000002340151030100260017531 0ustar00# Copyright (C) 2014  Red Hat, Inc.
    #
    # This copyrighted material is made available to anyone wishing to use,
    # modify, copy, or redistribute it subject to the terms and conditions of
    # the GNU General Public License v.2, or (at your option) any later version.
    # This program is distributed in the hope that it will be useful, but WITHOUT
    # ANY WARRANTY expressed or implied, including the implied warranties of
    # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General
    # Public License for more details.  You should have received a copy of the
    # GNU General Public License along with this program; if not, write to the
    # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
    # 02110-1301, USA.  Any Red Hat trademarks that are incorporated in the
    # source code or documentation are not subject to the GNU General Public
    # License and may only be used or replicated with the express permission of
    # Red Hat, Inc.
    #
    
    """ Common code for dnf-plugins-core"""
    from __future__ import absolute_import
    from __future__ import unicode_literals
    
    import dnf.exceptions
    import logging
    
    _, P_ = dnf.i18n.translation('dnf-plugins-core')
    logger = logging.getLogger('dnf.plugin')
    rpm_logger = logging.getLogger('dnf.rpm')
    
    
    usr/lib/python3.6/site-packages/firewall/server/__init__.py000064400000000000151030100360017612 0ustar00usr/lib64/python3.6/site-packages/lxml/html/__init__.py000064400000176571151030104160016626 0ustar00# Copyright (c) 2004 Ian Bicking. All rights reserved.
    #
    # Redistribution and use in source and binary forms, with or without
    # modification, are permitted provided that the following conditions are
    # met:
    #
    # 1. Redistributions of source code must retain the above copyright
    # notice, this list of conditions and the following disclaimer.
    #
    # 2. Redistributions in binary form must reproduce the above copyright
    # notice, this list of conditions and the following disclaimer in
    # the documentation and/or other materials provided with the
    # distribution.
    #
    # 3. Neither the name of Ian Bicking nor the names of its contributors may
    # be used to endorse or promote products derived from this software
    # without specific prior written permission.
    #
    # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IAN BICKING OR
    # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
    # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    
    """The ``lxml.html`` tool set for HTML handling.
    """
    
    from __future__ import absolute_import
    
    __all__ = [
        'document_fromstring', 'fragment_fromstring', 'fragments_fromstring', 'fromstring',
        'tostring', 'Element', 'defs', 'open_in_browser', 'submit_form',
        'find_rel_links', 'find_class', 'make_links_absolute',
        'resolve_base_href', 'iterlinks', 'rewrite_links', 'open_in_browser', 'parse']
    
    
    import copy
    import sys
    import re
    from functools import partial
    
    try:
        # while unnecessary, importing from 'collections.abc' is the right way to do it
        from collections.abc import MutableMapping, MutableSet
    except ImportError:
        from collections import MutableMapping, MutableSet
    
    from .. import etree
    from . import defs
    from ._setmixin import SetMixin
    
    try:
        from urlparse import urljoin
    except ImportError:
        # Python 3
        from urllib.parse import urljoin
    
    try:
        unicode
    except NameError:
        # Python 3
        unicode = str
    try:
        basestring
    except NameError:
        # Python 3
        basestring = (str, bytes)
    
    
    def __fix_docstring(s):
        if not s:
            return s
        if sys.version_info[0] >= 3:
            sub = re.compile(r"^(\s*)u'", re.M).sub
        else:
            sub = re.compile(r"^(\s*)b'", re.M).sub
        return sub(r"\1'", s)
    
    
    XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
    
    _rel_links_xpath = etree.XPath("descendant-or-self::a[@rel]|descendant-or-self::x:a[@rel]",
                                   namespaces={'x':XHTML_NAMESPACE})
    _options_xpath = etree.XPath("descendant-or-self::option|descendant-or-self::x:option",
                                 namespaces={'x':XHTML_NAMESPACE})
    _forms_xpath = etree.XPath("descendant-or-self::form|descendant-or-self::x:form",
                               namespaces={'x':XHTML_NAMESPACE})
    #_class_xpath = etree.XPath(r"descendant-or-self::*[regexp:match(@class, concat('\b', $class_name, '\b'))]", {'regexp': 'http://exslt.org/regular-expressions'})
    _class_xpath = etree.XPath("descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), concat(' ', $class_name, ' '))]")
    _id_xpath = etree.XPath("descendant-or-self::*[@id=$id]")
    _collect_string_content = etree.XPath("string()")
    _iter_css_urls = re.compile(r'url\(('+'["][^"]*["]|'+"['][^']*[']|"+r'[^)]*)\)', re.I).finditer
    _iter_css_imports = re.compile(r'@import "(.*?)"').finditer
    _label_xpath = etree.XPath("//label[@for=$id]|//x:label[@for=$id]",
                               namespaces={'x':XHTML_NAMESPACE})
    _archive_re = re.compile(r'[^ ]+')
    _parse_meta_refresh_url = re.compile(
        r'[^;=]*;\s*(?:url\s*=\s*)?(?P.*)$', re.I).search
    
    
    def _unquote_match(s, pos):
        if s[:1] == '"' and s[-1:] == '"' or s[:1] == "'" and s[-1:] == "'":
            return s[1:-1], pos+1
        else:
            return s,pos
    
    
    def _transform_result(typ, result):
        """Convert the result back into the input type.
        """
        if issubclass(typ, bytes):
            return tostring(result, encoding='utf-8')
        elif issubclass(typ, unicode):
            return tostring(result, encoding='unicode')
        else:
            return result
    
    
    def _nons(tag):
        if isinstance(tag, basestring):
            if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:
                return tag.split('}')[-1]
        return tag
    
    
    class Classes(MutableSet):
        """Provides access to an element's class attribute as a set-like collection.
        Usage::
    
            >>> el = fromstring('')
            >>> classes = el.classes  # or: classes = Classes(el.attrib)
            >>> classes |= ['block', 'paragraph']
            >>> el.get('class')
            'hidden large block paragraph'
            >>> classes.toggle('hidden')
            False
            >>> el.get('class')
            'large block paragraph'
            >>> classes -= ('some', 'classes', 'block')
            >>> el.get('class')
            'large paragraph'
        """
        def __init__(self, attributes):
            self._attributes = attributes
            self._get_class_value = partial(attributes.get, 'class', '')
    
        def add(self, value):
            """
            Add a class.
    
            This has no effect if the class is already present.
            """
            if not value or re.search(r'\s', value):
                raise ValueError("Invalid class name: %r" % value)
            classes = self._get_class_value().split()
            if value in classes:
                return
            classes.append(value)
            self._attributes['class'] = ' '.join(classes)
    
        def discard(self, value):
            """
            Remove a class if it is currently present.
    
            If the class is not present, do nothing.
            """
            if not value or re.search(r'\s', value):
                raise ValueError("Invalid class name: %r" % value)
            classes = [name for name in self._get_class_value().split()
                       if name != value]
            if classes:
                self._attributes['class'] = ' '.join(classes)
            elif 'class' in self._attributes:
                del self._attributes['class']
    
        def remove(self, value):
            """
            Remove a class; it must currently be present.
    
            If the class is not present, raise a KeyError.
            """
            if not value or re.search(r'\s', value):
                raise ValueError("Invalid class name: %r" % value)
            super(Classes, self).remove(value)
    
        def __contains__(self, name):
            classes = self._get_class_value()
            return name in classes and name in classes.split()
    
        def __iter__(self):
            return iter(self._get_class_value().split())
    
        def __len__(self):
            return len(self._get_class_value().split())
    
        # non-standard methods
    
        def update(self, values):
            """
            Add all names from 'values'.
            """
            classes = self._get_class_value().split()
            extended = False
            for value in values:
                if value not in classes:
                    classes.append(value)
                    extended = True
            if extended:
                self._attributes['class'] = ' '.join(classes)
    
        def toggle(self, value):
            """
            Add a class name if it isn't there yet, or remove it if it exists.
    
            Returns true if the class was added (and is now enabled) and
            false if it was removed (and is now disabled).
            """
            if not value or re.search(r'\s', value):
                raise ValueError("Invalid class name: %r" % value)
            classes = self._get_class_value().split()
            try:
                classes.remove(value)
                enabled = False
            except ValueError:
                classes.append(value)
                enabled = True
            if classes:
                self._attributes['class'] = ' '.join(classes)
            else:
                del self._attributes['class']
            return enabled
    
    
    class HtmlMixin(object):
    
        def set(self, key, value=None):
            """set(self, key, value=None)
    
            Sets an element attribute.  If no value is provided, or if the value is None,
            creates a 'boolean' attribute without value, e.g. "
    " for ``form.set('novalidate')``. """ super(HtmlElement, self).set(key, value) @property def classes(self): """ A set-like wrapper around the 'class' attribute. """ return Classes(self.attrib) @classes.setter def classes(self, classes): assert isinstance(classes, Classes) # only allow "el.classes |= ..." etc. value = classes._get_class_value() if value: self.set('class', value) elif self.get('class') is not None: del self.attrib['class'] @property def base_url(self): """ Returns the base URL, given when the page was parsed. Use with ``urlparse.urljoin(el.base_url, href)`` to get absolute URLs. """ return self.getroottree().docinfo.URL @property def forms(self): """ Return a list of all the forms """ return _forms_xpath(self) @property def body(self): """ Return the element. Can be called from a child element to get the document's head. """ return self.xpath('//body|//x:body', namespaces={'x':XHTML_NAMESPACE})[0] @property def head(self): """ Returns the element. Can be called from a child element to get the document's head. """ return self.xpath('//head|//x:head', namespaces={'x':XHTML_NAMESPACE})[0] @property def label(self): """ Get or set any